|
24 | 24 | }
|
25 | 25 |
|
26 | 26 |
|
27 |
| -def inception_v3( |
28 |
| - pretrained: bool = False, |
29 |
| - progress: bool = True, |
30 |
| - quantize: bool = False, |
31 |
| - **kwargs: Any, |
32 |
| -) -> "QuantizableInception3": |
33 |
| - |
34 |
| - r"""Inception v3 model architecture from |
35 |
| - `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_. |
36 |
| -
|
37 |
| - .. note:: |
38 |
| - **Important**: In contrast to the other models the inception_v3 expects tensors with a size of |
39 |
| - N x 3 x 299 x 299, so ensure your images are sized accordingly. |
40 |
| -
|
41 |
| - Note that quantize = True returns a quantized model with 8 bit |
42 |
| - weights. Quantized models only support inference and run on CPUs. |
43 |
| - GPU inference is not yet supported |
44 |
| -
|
45 |
| - Args: |
46 |
| - pretrained (bool): If True, returns a model pre-trained on ImageNet |
47 |
| - progress (bool): If True, displays a progress bar of the download to stderr |
48 |
| - quantize (bool): If True, return a quantized version of the model |
49 |
| - aux_logits (bool): If True, add an auxiliary branch that can improve training. |
50 |
| - Default: *True* |
51 |
| - transform_input (bool): If True, preprocesses the input according to the method with which it |
52 |
| - was trained on ImageNet. Default: *False* |
53 |
| - """ |
54 |
| - if pretrained: |
55 |
| - if "transform_input" not in kwargs: |
56 |
| - kwargs["transform_input"] = True |
57 |
| - if "aux_logits" in kwargs: |
58 |
| - original_aux_logits = kwargs["aux_logits"] |
59 |
| - kwargs["aux_logits"] = True |
60 |
| - else: |
61 |
| - original_aux_logits = False |
62 |
| - |
63 |
| - model = QuantizableInception3(**kwargs) |
64 |
| - _replace_relu(model) |
65 |
| - |
66 |
| - if quantize: |
67 |
| - # TODO use pretrained as a string to specify the backend |
68 |
| - backend = "fbgemm" |
69 |
| - quantize_model(model, backend) |
70 |
| - else: |
71 |
| - assert pretrained in [True, False] |
72 |
| - |
73 |
| - if pretrained: |
74 |
| - if quantize: |
75 |
| - if not original_aux_logits: |
76 |
| - model.aux_logits = False |
77 |
| - model.AuxLogits = None |
78 |
| - model_url = quant_model_urls["inception_v3_google_" + backend] |
79 |
| - else: |
80 |
| - model_url = inception_module.model_urls["inception_v3_google"] |
81 |
| - |
82 |
| - state_dict = load_state_dict_from_url(model_url, progress=progress) |
83 |
| - |
84 |
| - model.load_state_dict(state_dict) |
85 |
| - |
86 |
| - if not quantize: |
87 |
| - if not original_aux_logits: |
88 |
| - model.aux_logits = False |
89 |
| - model.AuxLogits = None |
90 |
| - return model |
91 |
| - |
92 |
| - |
93 | 27 | class QuantizableBasicConv2d(inception_module.BasicConv2d):
|
94 | 28 | def __init__(self, *args: Any, **kwargs: Any) -> None:
|
95 | 29 | super().__init__(*args, **kwargs)
|
@@ -237,3 +171,68 @@ def fuse_model(self) -> None:
|
237 | 171 | for m in self.modules():
|
238 | 172 | if type(m) is QuantizableBasicConv2d:
|
239 | 173 | m.fuse_model()
|
| 174 | + |
| 175 | + |
| 176 | +def inception_v3( |
| 177 | + pretrained: bool = False, |
| 178 | + progress: bool = True, |
| 179 | + quantize: bool = False, |
| 180 | + **kwargs: Any, |
| 181 | +) -> QuantizableInception3: |
| 182 | + r"""Inception v3 model architecture from |
| 183 | + `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_. |
| 184 | +
|
| 185 | + .. note:: |
| 186 | + **Important**: In contrast to the other models the inception_v3 expects tensors with a size of |
| 187 | + N x 3 x 299 x 299, so ensure your images are sized accordingly. |
| 188 | +
|
| 189 | + Note that quantize = True returns a quantized model with 8 bit |
| 190 | + weights. Quantized models only support inference and run on CPUs. |
| 191 | + GPU inference is not yet supported |
| 192 | +
|
| 193 | + Args: |
| 194 | + pretrained (bool): If True, returns a model pre-trained on ImageNet |
| 195 | + progress (bool): If True, displays a progress bar of the download to stderr |
| 196 | + quantize (bool): If True, return a quantized version of the model |
| 197 | + aux_logits (bool): If True, add an auxiliary branch that can improve training. |
| 198 | + Default: *True* |
| 199 | + transform_input (bool): If True, preprocesses the input according to the method with which it |
| 200 | + was trained on ImageNet. Default: *False* |
| 201 | + """ |
| 202 | + if pretrained: |
| 203 | + if "transform_input" not in kwargs: |
| 204 | + kwargs["transform_input"] = True |
| 205 | + if "aux_logits" in kwargs: |
| 206 | + original_aux_logits = kwargs["aux_logits"] |
| 207 | + kwargs["aux_logits"] = True |
| 208 | + else: |
| 209 | + original_aux_logits = False |
| 210 | + |
| 211 | + model = QuantizableInception3(**kwargs) |
| 212 | + _replace_relu(model) |
| 213 | + |
| 214 | + if quantize: |
| 215 | + # TODO use pretrained as a string to specify the backend |
| 216 | + backend = "fbgemm" |
| 217 | + quantize_model(model, backend) |
| 218 | + else: |
| 219 | + assert pretrained in [True, False] |
| 220 | + |
| 221 | + if pretrained: |
| 222 | + if quantize: |
| 223 | + if not original_aux_logits: |
| 224 | + model.aux_logits = False |
| 225 | + model.AuxLogits = None |
| 226 | + model_url = quant_model_urls["inception_v3_google_" + backend] |
| 227 | + else: |
| 228 | + model_url = inception_module.model_urls["inception_v3_google"] |
| 229 | + |
| 230 | + state_dict = load_state_dict_from_url(model_url, progress=progress) |
| 231 | + |
| 232 | + model.load_state_dict(state_dict) |
| 233 | + |
| 234 | + if not quantize: |
| 235 | + if not original_aux_logits: |
| 236 | + model.aux_logits = False |
| 237 | + model.AuxLogits = None |
| 238 | + return model |
0 commit comments