Skip to content

upgrade usort to 1.0.2 and black to 22.3.0 #5106

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Jul 22, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ repos:
hooks:
- id: ufmt
additional_dependencies:
- black == 21.9b0
- usort == 0.6.4
- black == 22.3.0
- usort == 1.0.2

- repo: https://gitlab.com/pycqa/flake8
rev: 3.9.2
Expand Down
60 changes: 27 additions & 33 deletions hubconf.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@

from torchvision.models import get_weight
from torchvision.models.alexnet import alexnet
from torchvision.models.convnext import convnext_tiny, convnext_small, convnext_base, convnext_large
from torchvision.models.densenet import densenet121, densenet169, densenet201, densenet161
from torchvision.models.convnext import convnext_base, convnext_large, convnext_small, convnext_tiny
from torchvision.models.densenet import densenet121, densenet161, densenet169, densenet201
from torchvision.models.efficientnet import (
efficientnet_b0,
efficientnet_b1,
Expand All @@ -14,9 +14,9 @@
efficientnet_b5,
efficientnet_b6,
efficientnet_b7,
efficientnet_v2_s,
efficientnet_v2_m,
efficientnet_v2_l,
efficientnet_v2_m,
efficientnet_v2_s,
)
from torchvision.models.googlenet import googlenet
from torchvision.models.inception import inception_v3
Expand All @@ -25,40 +25,40 @@
from torchvision.models.mobilenetv3 import mobilenet_v3_large, mobilenet_v3_small
from torchvision.models.optical_flow import raft_large, raft_small
from torchvision.models.regnet import (
regnet_y_400mf,
regnet_y_800mf,
regnet_y_1_6gf,
regnet_y_3_2gf,
regnet_y_8gf,
regnet_y_16gf,
regnet_y_32gf,
regnet_y_128gf,
regnet_x_400mf,
regnet_x_800mf,
regnet_x_16gf,
regnet_x_1_6gf,
regnet_x_32gf,
regnet_x_3_2gf,
regnet_x_400mf,
regnet_x_800mf,
regnet_x_8gf,
regnet_x_16gf,
regnet_x_32gf,
regnet_y_128gf,
regnet_y_16gf,
regnet_y_1_6gf,
regnet_y_32gf,
regnet_y_3_2gf,
regnet_y_400mf,
regnet_y_800mf,
regnet_y_8gf,
)
from torchvision.models.resnet import (
resnet101,
resnet152,
resnet18,
resnet34,
resnet50,
resnet101,
resnet152,
resnext50_32x4d,
resnext101_32x8d,
resnext101_64x4d,
wide_resnet50_2,
resnext50_32x4d,
wide_resnet101_2,
wide_resnet50_2,
)
from torchvision.models.segmentation import (
fcn_resnet50,
fcn_resnet101,
deeplabv3_resnet50,
deeplabv3_resnet101,
deeplabv3_mobilenet_v3_large,
deeplabv3_resnet101,
deeplabv3_resnet50,
fcn_resnet101,
fcn_resnet50,
lraspp_mobilenet_v3_large,
)
from torchvision.models.shufflenetv2 import (
Expand All @@ -68,12 +68,6 @@
shufflenet_v2_x2_0,
)
from torchvision.models.squeezenet import squeezenet1_0, squeezenet1_1
from torchvision.models.swin_transformer import swin_t, swin_s, swin_b
from torchvision.models.vgg import vgg11, vgg13, vgg16, vgg19, vgg11_bn, vgg13_bn, vgg16_bn, vgg19_bn
from torchvision.models.vision_transformer import (
vit_b_16,
vit_b_32,
vit_l_16,
vit_l_32,
vit_h_14,
)
from torchvision.models.swin_transformer import swin_b, swin_s, swin_t
from torchvision.models.vgg import vgg11, vgg11_bn, vgg13, vgg13_bn, vgg16, vgg16_bn, vgg19, vgg19_bn
from torchvision.models.vision_transformer import vit_b_16, vit_b_32, vit_h_14, vit_l_16, vit_l_32
2 changes: 1 addition & 1 deletion references/classification/train_quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
import torchvision
import utils
from torch import nn
from train import train_one_epoch, evaluate, load_data
from train import evaluate, load_data, train_one_epoch


def main(args):
Expand Down
2 changes: 1 addition & 1 deletion references/detection/group_by_aspect_ratio.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import copy
import math
from collections import defaultdict
from itertools import repeat, chain
from itertools import chain, repeat

import numpy as np
import torch
Expand Down
4 changes: 2 additions & 2 deletions references/detection/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@
import torchvision.models.detection.mask_rcnn
import utils
from coco_utils import get_coco, get_coco_kp
from engine import train_one_epoch, evaluate
from group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups
from engine import evaluate, train_one_epoch
from group_by_aspect_ratio import create_aspect_ratio_groups, GroupedBatchSampler
from torchvision.transforms import InterpolationMode
from transforms import SimpleCopyPaste

Expand Down
5 changes: 2 additions & 3 deletions references/detection/transforms.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
from typing import List, Tuple, Dict, Optional, Union
from typing import Dict, List, Optional, Tuple, Union

import torch
import torchvision
from torch import nn, Tensor
from torchvision import ops
from torchvision.transforms import functional as F
from torchvision.transforms import transforms as T, InterpolationMode
from torchvision.transforms import functional as F, InterpolationMode, transforms as T


def _flip_coco_person_keypoints(kps, width):
Expand Down
4 changes: 2 additions & 2 deletions references/optical_flow/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
import torch
import torchvision.models.optical_flow
import utils
from presets import OpticalFlowPresetTrain, OpticalFlowPresetEval
from torchvision.datasets import KittiFlow, FlyingChairs, FlyingThings3D, Sintel, HD1K
from presets import OpticalFlowPresetEval, OpticalFlowPresetTrain
from torchvision.datasets import FlyingChairs, FlyingThings3D, HD1K, KittiFlow, Sintel


def get_train_dataset(stage, dataset_root):
Expand Down
7 changes: 3 additions & 4 deletions references/optical_flow/utils.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
import datetime
import os
import time
from collections import defaultdict
from collections import deque
from collections import defaultdict, deque

import torch
import torch.distributed as dist
Expand Down Expand Up @@ -158,7 +157,7 @@ def log_every(self, iterable, print_freq=5, header=None):
def compute_metrics(flow_pred, flow_gt, valid_flow_mask=None):

epe = ((flow_pred - flow_gt) ** 2).sum(dim=1).sqrt()
flow_norm = (flow_gt ** 2).sum(dim=1).sqrt()
flow_norm = (flow_gt**2).sum(dim=1).sqrt()

if valid_flow_mask is not None:
epe = epe[valid_flow_mask]
Expand All @@ -183,7 +182,7 @@ def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400)
raise ValueError(f"Gamma should be < 1, got {gamma}.")

# exlude invalid pixels and extremely large diplacements
flow_norm = torch.sum(flow_gt ** 2, dim=1).sqrt()
flow_norm = torch.sum(flow_gt**2, dim=1).sqrt()
valid_flow_mask = valid_flow_mask & (flow_norm < max_flow)

valid_flow_mask = valid_flow_mask[:, None, :, :]
Expand Down
2 changes: 1 addition & 1 deletion references/segmentation/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def update(self, a, b):
with torch.inference_mode():
k = (a >= 0) & (a < n)
inds = n * a[k].to(torch.int64) + b[k]
self.mat += torch.bincount(inds, minlength=n ** 2).reshape(n, n)
self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n)

def reset(self):
self.mat.zero_()
Expand Down
6 changes: 3 additions & 3 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@
import sys

import torch
from pkg_resources import parse_version, get_distribution, DistributionNotFound
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from pkg_resources import DistributionNotFound, get_distribution, parse_version
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDA_HOME, CUDAExtension


def read(*names, **kwargs):
Expand Down
4 changes: 2 additions & 2 deletions test/builtin_dataset_mocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,12 @@
import unittest.mock
import warnings
import xml.etree.ElementTree as ET
from collections import defaultdict, Counter
from collections import Counter, defaultdict

import numpy as np
import pytest
import torch
from datasets_utils import make_zip, make_tar, create_image_folder, create_image_file, combinations_grid
from datasets_utils import combinations_grid, create_image_file, create_image_folder, make_tar, make_zip
from torch.nn.functional import one_hot
from torch.testing import make_tensor as _make_tensor
from torchvision.prototype import datasets
Expand Down
2 changes: 1 addition & 1 deletion test/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import numpy as np
import pytest
import torch
from common_utils import IN_CIRCLE_CI, CIRCLECI_GPU_NO_CUDA_MSG, IN_FBCODE, IN_RE_WORKER, CUDA_NOT_AVAILABLE_MSG
from common_utils import CIRCLECI_GPU_NO_CUDA_MSG, CUDA_NOT_AVAILABLE_MSG, IN_CIRCLE_CI, IN_FBCODE, IN_RE_WORKER


def pytest_configure(config):
Expand Down
2 changes: 1 addition & 1 deletion test/datasets_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
import torch
import torchvision.datasets
import torchvision.io
from common_utils import get_tmp_dir, disable_console_output
from common_utils import disable_console_output, get_tmp_dir


__all__ = [
Expand Down
6 changes: 3 additions & 3 deletions test/test_datasets_download.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,15 @@
from os import path
from urllib.error import HTTPError, URLError
from urllib.parse import urlparse
from urllib.request import urlopen, Request
from urllib.request import Request, urlopen

import pytest
from torchvision import datasets
from torchvision.datasets.utils import (
download_url,
_get_redirect_url,
check_integrity,
download_file_from_google_drive,
_get_redirect_url,
download_url,
USER_AGENT,
)

Expand Down
8 changes: 2 additions & 6 deletions test/test_datasets_samplers.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,8 @@
import pytest
import torch
from common_utils import get_list_of_videos, assert_equal
from common_utils import assert_equal, get_list_of_videos
from torchvision import io
from torchvision.datasets.samplers import (
DistributedSampler,
RandomClipSampler,
UniformClipSampler,
)
from torchvision.datasets.samplers import DistributedSampler, RandomClipSampler, UniformClipSampler
from torchvision.datasets.video_utils import VideoClips


Expand Down
4 changes: 2 additions & 2 deletions test/test_datasets_video_utils.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import pytest
import torch
from common_utils import get_list_of_videos, assert_equal
from common_utils import assert_equal, get_list_of_videos
from torchvision import io
from torchvision.datasets.video_utils import VideoClips, unfold
from torchvision.datasets.video_utils import unfold, VideoClips


class TestVideo:
Expand Down
2 changes: 1 addition & 1 deletion test/test_extended_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import test_models as TM
import torch
from torchvision import models
from torchvision.models._api import WeightsEnum, Weights
from torchvision.models._api import Weights, WeightsEnum
from torchvision.models._utils import handle_legacy_interface


Expand Down
8 changes: 4 additions & 4 deletions test/test_functional_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,14 @@
import torchvision.transforms.functional_pil as F_pil
import torchvision.transforms.functional_tensor as F_t
from common_utils import (
cpu_and_gpu,
needs_cuda,
_assert_approx_equal_tensor_to_pil,
_assert_equal_tensor_to_pil,
_create_data,
_create_data_batch,
_assert_equal_tensor_to_pil,
_assert_approx_equal_tensor_to_pil,
_test_fn_on_batch,
assert_equal,
cpu_and_gpu,
needs_cuda,
)
from torchvision.transforms import InterpolationMode

Expand Down
20 changes: 10 additions & 10 deletions test/test_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,21 +8,21 @@
import pytest
import torch
import torchvision.transforms.functional as F
from common_utils import needs_cuda, assert_equal
from PIL import Image, __version__ as PILLOW_VERSION
from common_utils import assert_equal, needs_cuda
from PIL import __version__ as PILLOW_VERSION, Image
from torchvision.io.image import (
decode_png,
_read_png_16,
decode_image,
decode_jpeg,
decode_png,
encode_jpeg,
write_jpeg,
decode_image,
read_file,
encode_png,
write_png,
write_file,
ImageReadMode,
read_file,
read_image,
_read_png_16,
write_file,
write_jpeg,
write_png,
)

IMAGE_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets")
Expand Down Expand Up @@ -168,7 +168,7 @@ def test_decode_png(img_path, pil_mode, mode):
img_lpng = _read_png_16(img_path, mode=mode)
assert img_lpng.dtype == torch.int32
# PIL converts 16 bits pngs in uint8
img_lpng = torch.round(img_lpng / (2 ** 16 - 1) * 255).to(torch.uint8)
img_lpng = torch.round(img_lpng / (2**16 - 1) * 255).to(torch.uint8)
else:
data = read_file(img_path)
img_lpng = decode_image(data, mode=mode)
Expand Down
2 changes: 1 addition & 1 deletion test/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
import torch.fx
import torch.nn as nn
from _utils_internal import get_relative_path
from common_utils import map_nested_tensor_object, freeze_rng_state, set_rng_seed, cpu_and_gpu, needs_cuda
from common_utils import cpu_and_gpu, freeze_rng_state, map_nested_tensor_object, needs_cuda, set_rng_seed
from torchvision import models

ACCEPT = os.getenv("EXPECTTEST_ACCEPT", "0") == "1"
Expand Down
4 changes: 2 additions & 2 deletions test/test_models_detection_negative_samples.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from common_utils import assert_equal
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor, TwoMLPHead
from torchvision.models.detection.roi_heads import RoIHeads
from torchvision.models.detection.rpn import AnchorGenerator, RPNHead, RegionProposalNetwork
from torchvision.models.detection.rpn import AnchorGenerator, RegionProposalNetwork, RPNHead
from torchvision.ops import MultiScaleRoIAlign


Expand Down Expand Up @@ -60,7 +60,7 @@ def test_assign_targets_to_proposals(self):

resolution = box_roi_pool.output_size[0]
representation_size = 1024
box_head = TwoMLPHead(4 * resolution ** 2, representation_size)
box_head = TwoMLPHead(4 * resolution**2, representation_size)

representation_size = 1024
box_predictor = FastRCNNPredictor(representation_size, 2)
Expand Down
Loading