Skip to content

Commit a73f03c

Browse files
prabhat00155facebook-github-bot
authored andcommitted
[fbsync] Expose misc ops at package level (#4812)
Summary: * Expose misc ops at package level * Adding documentation to the ops exposed Reviewed By: kazhang Differential Revision: D32216670 fbshipit-source-id: 63d39b4e9b07020b29a399d0a1ae7bbb5f26bdbf
1 parent 1613b49 commit a73f03c

File tree

3 files changed

+40
-12
lines changed

3 files changed

+40
-12
lines changed

docs/source/ops.rst

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,3 +43,6 @@ Operators
4343
MultiScaleRoIAlign
4444
FeaturePyramidNetwork
4545
StochasticDepth
46+
FrozenBatchNorm2d
47+
ConvNormActivation
48+
SqueezeExcitation

torchvision/ops/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
from .deform_conv import deform_conv2d, DeformConv2d
1414
from .feature_pyramid_network import FeaturePyramidNetwork
1515
from .focal_loss import sigmoid_focal_loss
16+
from .misc import FrozenBatchNorm2d, ConvNormActivation, SqueezeExcitation
1617
from .poolers import MultiScaleRoIAlign
1718
from .ps_roi_align import ps_roi_align, PSRoIAlign
1819
from .ps_roi_pool import ps_roi_pool, PSRoIPool
@@ -48,4 +49,7 @@
4849
"sigmoid_focal_loss",
4950
"stochastic_depth",
5051
"StochasticDepth",
52+
"FrozenBatchNorm2d",
53+
"ConvNormActivation",
54+
"SqueezeExcitation",
5155
]

torchvision/ops/misc.py

Lines changed: 33 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,3 @@
1-
"""
2-
helper class that supports empty tensors on some nn functions.
3-
4-
Ideally, add support directly in PyTorch to empty tensors in
5-
those functions.
6-
7-
This can be removed once https://github.com/pytorch/pytorch/issues/12013
8-
is implemented
9-
"""
10-
111
import warnings
122
from typing import Callable, List, Optional
133

@@ -53,8 +43,11 @@ def __init__(self, *args, **kwargs):
5343
# This is not in nn
5444
class FrozenBatchNorm2d(torch.nn.Module):
5545
"""
56-
BatchNorm2d where the batch statistics and the affine parameters
57-
are fixed
46+
BatchNorm2d where the batch statistics and the affine parameters are fixed
47+
48+
Args:
49+
num_features (int): Number of features ``C`` from an expected input of size ``(N, C, H, W)``
50+
eps (float): a value added to the denominator for numerical stability. Default: 1e-5
5851
"""
5952

6053
def __init__(
@@ -109,6 +102,23 @@ def __repr__(self) -> str:
109102

110103

111104
class ConvNormActivation(torch.nn.Sequential):
105+
"""
106+
Configurable block used for Convolution-Normalzation-Activation blocks.
107+
108+
Args:
109+
in_channels (int): Number of channels in the input image
110+
out_channels (int): Number of channels produced by the Convolution-Normalzation-Activation block
111+
kernel_size: (int, optional): Size of the convolving kernel. Default: 3
112+
stride (int, optional): Stride of the convolution. Default: 1
113+
padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in wich case it will calculated as ``padding = (kernel_size - 1) // 2 * dilation``
114+
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
115+
norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolutiuon layer. If ``None`` this layer wont be used. Default: ``torch.nn.BatchNorm2d``
116+
activation_layer (Callable[..., torch.nn.Module], optinal): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer wont be used. Default: ``torch.nn.ReLU``
117+
dilation (int): Spacing between kernel elements. Default: 1
118+
inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
119+
120+
"""
121+
112122
def __init__(
113123
self,
114124
in_channels: int,
@@ -146,6 +156,17 @@ def __init__(
146156

147157

148158
class SqueezeExcitation(torch.nn.Module):
159+
"""
160+
This block implements the Squeeze-and-Excitation block from https://arxiv.org/abs/1709.01507 (see Fig. 1).
161+
Parameters ``activation``, and ``scale_activation`` correspond to ``delta`` and ``sigma`` in in eq. 3.
162+
163+
Args:
164+
input_channels (int): Number of channels in the input image
165+
squeeze_channels (int): Number of squeeze channels
166+
activation (Callable[..., torch.nn.Module], optional): ``delta`` activation. Default: ``torch.nn.ReLU``
167+
scale_activation (Callable[..., torch.nn.Module]): ``sigma`` activation. Default: ``torch.nn.Sigmoid``
168+
"""
169+
149170
def __init__(
150171
self,
151172
input_channels: int,

0 commit comments

Comments
 (0)