Skip to content

Commit 1e6c8c9

Browse files
XuehaiPanpull[bot]
authored andcommitted
[BE][Easy][14/19] enforce style for empty lines in import segments in torch/_[a-c]*/ and torch/_[e-h]*/ and torch/_[j-z]*/ (pytorch#129765)
See pytorch#129751 (comment). Most changes are auto-generated by linter. You can review these PRs via: ```bash git diff --ignore-all-space --ignore-blank-lines HEAD~1 ``` Pull Request resolved: pytorch#129765 Approved by: https://github.com/ezyang
1 parent 1d731aa commit 1e6c8c9

File tree

98 files changed

+193
-169
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

98 files changed

+193
-169
lines changed

tools/linter/adapters/ufmt_linter.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,16 +48,12 @@
4848
# test/[q-z]*/**
4949
"test/[q-z]*/**",
5050
# torch/**
51-
"torch/**",
5251
# torch/_[a-c]*/**
53-
"torch/_[a-c]*/**",
5452
# torch/_d*/**
5553
"torch/_d*/**",
5654
# torch/_[e-h]*/**
57-
"torch/_[e-h]*/**",
5855
# torch/_i*/**
5956
# torch/_[j-z]*/**
60-
"torch/_[j-z]*/**",
6157
# torch/[a-c]*/**
6258
"torch/[a-c]*/**",
6359
# torch/d*/**

torch/__init__.py

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
)
3737
from typing_extensions import ParamSpec as _ParamSpec, TypeGuard as _TypeGuard
3838

39+
3940
if TYPE_CHECKING:
4041
from .types import IntLikeType
4142

@@ -59,6 +60,7 @@ def _running_with_deploy() -> builtins.bool:
5960
USE_RTLD_GLOBAL_WITH_LIBTORCH,
6061
)
6162

63+
6264
# TODO(torch_deploy) figure out how to freeze version.py in fbcode build
6365
if _running_with_deploy():
6466
__version__ = "torch-deploy-1.8"
@@ -915,6 +917,7 @@ def sym_ite(b, t, f):
915917
# Make an explicit reference to the _C submodule to appease linters
916918
from torch import _C as _C
917919

920+
918921
__name, __obj = "", None
919922
for __name in dir(_C):
920923
if __name[0] != "_" and not __name.endswith("Base"):
@@ -1680,6 +1683,7 @@ def _check_tensor_all(cond, message=None): # noqa: F811
16801683
# NumPy consistency (https://numpy.org/devdocs/reference/constants.html)
16811684
from math import e, inf, nan, pi
16821685

1686+
16831687
newaxis: None = None
16841688

16851689
__all__.extend(["e", "pi", "nan", "inf", "newaxis"])
@@ -1922,6 +1926,7 @@ def _dtype(self):
19221926
from torch.random import get_rng_state, initial_seed, manual_seed, seed, set_rng_state
19231927
from torch.serialization import load, save
19241928

1929+
19251930
################################################################################
19261931
# Initialize extension
19271932
################################################################################
@@ -1983,6 +1988,7 @@ def _manager_path():
19831988

19841989
import torch
19851990

1991+
19861992
__all__.extend(
19871993
name for name in dir(torch) if isinstance(getattr(torch, name), torch.dtype)
19881994
)
@@ -2074,6 +2080,7 @@ def _assert(condition, message):
20742080
)
20752081
from torch.signal import windows as windows
20762082

2083+
20772084
# Quantized, sparse, AO, etc. should be last to get imported, as nothing
20782085
# is expected to depend on them.
20792086
from torch import ao as ao # usort: skip
@@ -2084,11 +2091,13 @@ def _assert(condition, message):
20842091
import torch.nn.quantizable
20852092
import torch.nn.quantized
20862093

2094+
20872095
_C._init_names(list(_storage_classes))
20882096

20892097
# attach docstrings to torch and tensor functions
20902098
from torch import _size_docs, _storage_docs, _tensor_docs, _torch_docs
20912099

2100+
20922101
del _torch_docs, _tensor_docs, _storage_docs, _size_docs
20932102

20942103

@@ -2098,9 +2107,10 @@ def compiled_with_cxx11_abi() -> builtins.bool:
20982107

20992108

21002109
from torch import _library as _library, _ops as _ops
2110+
from torch._classes import classes as classes
2111+
21012112

21022113
# Import the ops "namespace"
2103-
from torch._classes import classes as classes
21042114
from torch._ops import ops as ops # usort: skip
21052115

21062116
# quantization depends on torch.fx and torch.ops
@@ -2118,13 +2128,15 @@ def compiled_with_cxx11_abi() -> builtins.bool:
21182128
# Register fork handler to initialize OpenMP in child processes (see gh-28389)
21192129
from torch.multiprocessing._atfork import register_after_fork
21202130

2131+
21212132
register_after_fork(torch.get_num_threads)
21222133
del register_after_fork
21232134

21242135
# Import tools that require fully imported torch (for applying
21252136
# torch.jit.script as a decorator, for instance):
21262137
from torch._lobpcg import lobpcg as lobpcg
21272138

2139+
21282140
# These were previously defined in native_functions.yaml and appeared on the
21292141
# `torch` namespace, but we moved them to c10 dispatch to facilitate custom
21302142
# class usage. We add these lines here to preserve backward compatibility.
@@ -2144,7 +2156,6 @@ def compiled_with_cxx11_abi() -> builtins.bool:
21442156
matrix_rank,
21452157
solve,
21462158
)
2147-
21482159
from torch.utils.dlpack import from_dlpack, to_dlpack
21492160

21502161

@@ -2466,6 +2477,7 @@ def _register_device_module(device_type, module):
24662477
from torch._higher_order_ops import cond as cond, while_loop as while_loop
24672478
from torch.func import vmap as vmap
24682479

2480+
24692481
if not TYPE_CHECKING:
24702482
from torch import _meta_registrations
24712483

@@ -2478,6 +2490,7 @@ def _register_device_module(device_type, module):
24782490
# Populate magic methods on SymInt and SymFloat
24792491
import torch.fx.experimental.sym_node
24802492

2493+
24812494
# Register MPS specific decomps
24822495
torch.backends.mps._init()
24832496

@@ -2592,6 +2605,7 @@ def _constrain_as_size(
25922605

25932606
from torch import _logging
25942607

2608+
25952609
_logging._init_logs()
25962610

25972611

torch/_appdirs.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@
5353
import os
5454
import sys
5555

56+
5657
unicode = str
5758

5859
if sys.platform.startswith("java"):

torch/_custom_ops.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
)
1111
from torch.library import get_ctx
1212

13+
1314
__all__ = [
1415
"custom_op",
1516
"impl",

torch/_export/converter.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,11 @@
33
import logging
44
import operator
55
import warnings
6-
76
from contextlib import contextmanager
87
from typing import Any, Dict, List, Optional, Set, Tuple, Union
98

109
import torch
1110
import torch.export._trace
12-
1311
from torch.export.exported_program import ExportedProgram
1412
from torch.export.graph_signature import (
1513
ConstantArgument,
@@ -22,6 +20,7 @@
2220
from torch.fx import subgraph_rewriter
2321
from torch.onnx.utils import _create_jit_graph
2422

23+
2524
log = logging.getLogger(__name__)
2625

2726

torch/_export/db/examples/model_attr_mutation.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
# mypy: allow-untyped-defs
22
import torch
3-
43
from torch._export.db.case import SupportLevel
54

65

torch/_export/db/examples/optional_input.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
# mypy: allow-untyped-defs
22
import torch
3-
43
from torch._export.db.case import SupportLevel
54

65

torch/_export/db/examples/torch_sym_min.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
# mypy: allow-untyped-defs
22
import torch
3-
43
from torch._export.db.case import SupportLevel
54

65

torch/_export/non_strict_utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
tree_map_with_path,
3939
)
4040

41+
4142
if TYPE_CHECKING:
4243
from sympy import Symbol
4344

torch/_export/passes/collect_tracepoints_pass.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,10 @@
22
import operator
33

44
import torch
5-
65
from torch.export.exported_program import ConstantArgument, TensorArgument
76
from torch.fx.passes.infra.pass_base import PassBase, PassResult
87

8+
99
__all__ = ["CollectTracepointsPass"]
1010

1111

torch/_export/passes/constant_folding.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
import torch
77
import torch.utils._pytree as pytree
88

9+
910
aten = torch.ops.aten
1011

1112
# We would like to split modules into two subgraphs for runtime weight updates to work correctly.

torch/_export/passes/lift_constants_pass.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
import torch
66
from torch._export.verifier import SpecViolationError
77
from torch._guards import detect_fake_mode
8-
98
from torch._library.fake_class_registry import FakeScriptObject
109
from torch.export.exported_program import (
1110
ArgumentSpec,

torch/_export/tools.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
import torch.export._trace
99
from torch._utils_internal import log_export_usage
1010

11+
1112
log = logging.getLogger(__name__)
1213

1314
__all__ = ["report_exportability"]

torch/_export/utils.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,13 +5,11 @@
55
import math
66
import operator
77
import re
8-
98
from inspect import Parameter
109
from typing import Any, Dict, Iterable, List, Optional, Tuple, Type
1110

1211
import torch
1312
from torch._subclasses.fake_tensor import FakeTensor
14-
1513
from torch.export import ExportedProgram
1614
from torch.export.exported_program import (
1715
_name_hoo_subgraph_placeholders,
@@ -33,6 +31,7 @@
3331
UnflattenFunc,
3432
)
3533

34+
3635
placeholder_prefixes = {
3736
InputKind.USER_INPUT: "",
3837
InputKind.PARAMETER: "p_",

torch/_functorch/_aot_autograd/autograd_cache.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,15 +8,12 @@
88
import os
99
import pickle
1010
import shutil
11-
1211
from dataclasses import dataclass
13-
1412
from typing import Callable, List, Optional, TYPE_CHECKING, Union
1513

1614
import torch
1715
from torch._dynamo.utils import counters
1816
from torch._functorch import config
19-
2017
from torch._inductor.codecache import (
2118
_ident,
2219
BypassFxGraphCache,
@@ -27,7 +24,6 @@
2724
FxGraphHashDetails,
2825
write_atomic,
2926
)
30-
3127
from torch._inductor.runtime.runtime_utils import cache_dir
3228

3329
from .runtime_wrappers import (
@@ -39,9 +35,9 @@
3935
RuntimeWrapper,
4036
SubclassMeta,
4137
)
42-
4338
from .schemas import AOTConfig, ViewAndMutationMeta # noqa: F401
4439

40+
4541
if TYPE_CHECKING:
4642
from torch.fx.node import Node
4743

torch/_functorch/_aot_autograd/collect_metadata_analysis.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,9 +45,9 @@
4545
ViewAndMutationMeta,
4646
)
4747
from .subclass_utils import create_subclass_meta
48-
4948
from .utils import _get_autocast_states, KNOWN_TYPES, strict_zip
5049

50+
5151
zip = strict_zip
5252

5353
log = logging.getLogger(__name__)

torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
import torch.utils.dlpack
1313
from torch import Tensor
1414
from torch._dispatch.python import enable_python_dispatcher
15-
1615
from torch._dynamo.utils import lazy_format_graph_code
1716
from torch._logging import getArtifactLogger, trace_structured
1817
from torch._subclasses.functional_tensor import FunctionalTensorMode
@@ -34,6 +33,7 @@
3433
)
3534
from .utils import root_module_when_exporting_non_strict, unlift_tokens
3635

36+
3737
aot_graphs_log = getArtifactLogger(__name__, "aot_graphs")
3838

3939

torch/_functorch/_aot_autograd/functional_utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
transform_subclass,
2424
)
2525

26+
2627
aot_joint_log = getArtifactLogger(__name__, "aot_joint_graph")
2728

2829

torch/_functorch/_aot_autograd/input_output_analysis.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
from torch import Tensor
1919
from torch._subclasses.functional_tensor import FunctionalTensor
2020
from torch.fx.experimental.symbolic_shapes import is_concrete_int
21+
2122
from .. import config
2223
from .collect_metadata_analysis import coerce_tangent
2324
from .schemas import (
@@ -30,6 +31,7 @@
3031
)
3132
from .utils import strict_zip
3233

34+
3335
zip = strict_zip
3436

3537

torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
import logging
1414
import traceback
1515
from contextlib import nullcontext
16-
1716
from typing import Any, Callable, List, Optional, Sequence, Tuple
1817

1918
import torch
@@ -27,6 +26,7 @@
2726
from torch.fx.experimental.proxy_tensor import is_sym_node
2827
from torch.fx.experimental.symbolic_shapes import fx_placeholder_vals
2928
from torch.multiprocessing.reductions import StorageWeakRef
29+
3030
from .. import config
3131
from .autograd_cache import (
3232
AOTAutogradCache,
@@ -39,7 +39,6 @@
3939
aot_dispatch_base_graph,
4040
)
4141
from .logging_utils import track_graph_compiling
42-
4342
from .runtime_wrappers import (
4443
AOTDedupeWrapper,
4544
AOTDispatchAutograd,
@@ -57,9 +56,9 @@
5756
)
5857
from .schemas import AOTConfig, MutationType, ViewAndMutationMeta
5958
from .subclass_utils import compute_inner_mutated_inp_indices_from_subclass_meta
60-
6159
from .utils import _get_symint_hints, make_boxed_func, strict_zip, unlift_tokens
6260

61+
6362
zip = strict_zip
6463

6564
log = logging.getLogger(__name__)

0 commit comments

Comments
 (0)