|
26 | 26 | _InceptionOutputs = InceptionOutputs
|
27 | 27 |
|
28 | 28 |
|
29 |
| -def inception_v3(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> "Inception3": |
30 |
| - r"""Inception v3 model architecture from |
31 |
| - `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_. |
32 |
| - The required minimum input size of the model is 75x75. |
33 |
| -
|
34 |
| - .. note:: |
35 |
| - **Important**: In contrast to the other models the inception_v3 expects tensors with a size of |
36 |
| - N x 3 x 299 x 299, so ensure your images are sized accordingly. |
37 |
| -
|
38 |
| - Args: |
39 |
| - pretrained (bool): If True, returns a model pre-trained on ImageNet |
40 |
| - progress (bool): If True, displays a progress bar of the download to stderr |
41 |
| - aux_logits (bool): If True, add an auxiliary branch that can improve training. |
42 |
| - Default: *True* |
43 |
| - transform_input (bool): If True, preprocesses the input according to the method with which it |
44 |
| - was trained on ImageNet. Default: *False* |
45 |
| - """ |
46 |
| - if pretrained: |
47 |
| - if "transform_input" not in kwargs: |
48 |
| - kwargs["transform_input"] = True |
49 |
| - if "aux_logits" in kwargs: |
50 |
| - original_aux_logits = kwargs["aux_logits"] |
51 |
| - kwargs["aux_logits"] = True |
52 |
| - else: |
53 |
| - original_aux_logits = True |
54 |
| - kwargs["init_weights"] = False # we are loading weights from a pretrained model |
55 |
| - model = Inception3(**kwargs) |
56 |
| - state_dict = load_state_dict_from_url(model_urls["inception_v3_google"], progress=progress) |
57 |
| - model.load_state_dict(state_dict) |
58 |
| - if not original_aux_logits: |
59 |
| - model.aux_logits = False |
60 |
| - model.AuxLogits = None |
61 |
| - return model |
62 |
| - |
63 |
| - return Inception3(**kwargs) |
64 |
| - |
65 |
| - |
66 | 29 | class Inception3(nn.Module):
|
67 | 30 | def __init__(
|
68 | 31 | self,
|
@@ -442,3 +405,40 @@ def forward(self, x: Tensor) -> Tensor:
|
442 | 405 | x = self.conv(x)
|
443 | 406 | x = self.bn(x)
|
444 | 407 | return F.relu(x, inplace=True)
|
| 408 | + |
| 409 | + |
| 410 | +def inception_v3(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> Inception3: |
| 411 | + r"""Inception v3 model architecture from |
| 412 | + `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_. |
| 413 | + The required minimum input size of the model is 75x75. |
| 414 | +
|
| 415 | + .. note:: |
| 416 | + **Important**: In contrast to the other models the inception_v3 expects tensors with a size of |
| 417 | + N x 3 x 299 x 299, so ensure your images are sized accordingly. |
| 418 | +
|
| 419 | + Args: |
| 420 | + pretrained (bool): If True, returns a model pre-trained on ImageNet |
| 421 | + progress (bool): If True, displays a progress bar of the download to stderr |
| 422 | + aux_logits (bool): If True, add an auxiliary branch that can improve training. |
| 423 | + Default: *True* |
| 424 | + transform_input (bool): If True, preprocesses the input according to the method with which it |
| 425 | + was trained on ImageNet. Default: *False* |
| 426 | + """ |
| 427 | + if pretrained: |
| 428 | + if "transform_input" not in kwargs: |
| 429 | + kwargs["transform_input"] = True |
| 430 | + if "aux_logits" in kwargs: |
| 431 | + original_aux_logits = kwargs["aux_logits"] |
| 432 | + kwargs["aux_logits"] = True |
| 433 | + else: |
| 434 | + original_aux_logits = True |
| 435 | + kwargs["init_weights"] = False # we are loading weights from a pretrained model |
| 436 | + model = Inception3(**kwargs) |
| 437 | + state_dict = load_state_dict_from_url(model_urls["inception_v3_google"], progress=progress) |
| 438 | + model.load_state_dict(state_dict) |
| 439 | + if not original_aux_logits: |
| 440 | + model.aux_logits = False |
| 441 | + model.AuxLogits = None |
| 442 | + return model |
| 443 | + |
| 444 | + return Inception3(**kwargs) |
0 commit comments