Skip to content

Pre-commit: Black #4588

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 15 additions & 8 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,21 +1,28 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.0.1
hooks:
- id: check-docstring-first
- id: check-toml
- id: check-yaml
exclude: packaging/.*
- id: end-of-file-fixer

- repo: https://github.com/psf/black
rev: 21.9b0
hooks:
- id: black

- repo: https://github.com/omnilib/ufmt
rev: v1.3.0
hooks:
- id: ufmt
additional_dependencies:
- black == 21.9b0
- usort == 0.6.4

- repo: https://gitlab.com/pycqa/flake8
rev: 3.9.2
hooks:
- id: flake8
args: [--config=setup.cfg]
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.0.1
hooks:
- id: check-docstring-first
- id: check-toml
- id: check-yaml
exclude: packaging/.*
- id: end-of-file-fixer
1 change: 1 addition & 0 deletions gallery/plot_repurposing_annotations.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,7 @@ def show(imgs):
# Here is an example where we re-purpose the dataset from the
# `PenFudan Detection Tutorial <https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html>`_.


class SegmentationToDetectionDataset(torch.utils.data.Dataset):
def __init__(self, root, transforms):
self.root = root
Expand Down
17 changes: 8 additions & 9 deletions gallery/plot_scripted_tensor_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,14 @@
from torchvision.io import read_image


plt.rcParams["savefig.bbox"] = 'tight'
plt.rcParams["savefig.bbox"] = "tight"
torch.manual_seed(1)


def show(imgs):
fix, axs = plt.subplots(ncols=len(imgs), squeeze=False)
for i, img in enumerate(imgs):
img = T.ToPILImage()(img.to('cpu'))
img = T.ToPILImage()(img.to("cpu"))
axs[0, i].imshow(np.asarray(img))
axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])

Expand All @@ -49,8 +49,8 @@ def show(imgs):
# The :func:`~torchvision.io.read_image` function allows to read an image and
# directly load it as a tensor

dog1 = read_image(str(Path('assets') / 'dog1.jpg'))
dog2 = read_image(str(Path('assets') / 'dog2.jpg'))
dog1 = read_image(str(Path("assets") / "dog1.jpg"))
dog2 = read_image(str(Path("assets") / "dog2.jpg"))
show([dog1, dog2])

####################################
Expand All @@ -68,7 +68,7 @@ def show(imgs):
T.RandomHorizontalFlip(p=0.3),
)

device = 'cuda' if torch.cuda.is_available() else 'cpu'
device = "cuda" if torch.cuda.is_available() else "cpu"
dog1 = dog1.to(device)
dog2 = dog2.to(device)

Expand All @@ -89,15 +89,14 @@ def show(imgs):


class Predictor(nn.Module):

def __init__(self):
super().__init__()
self.resnet18 = resnet18(pretrained=True, progress=False).eval()
self.transforms = nn.Sequential(
T.Resize([256, ]), # We use single int value inside a list due to torchscript type restrictions
T.Resize((256,)), # We use single int value inside a list due to torchscript type restrictions
T.CenterCrop(224),
T.ConvertImageDtype(torch.float),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
)

def forward(self, x: torch.Tensor) -> torch.Tensor:
Expand Down Expand Up @@ -125,7 +124,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:

import json

with open(Path('assets') / 'imagenet_class_index.json', 'r') as labels_file:
with open(Path("assets") / "imagenet_class_index.json", "r") as labels_file:
labels = json.load(labels_file)

for i, (pred, pred_scripted) in enumerate(zip(res, res_scripted)):
Expand Down
17 changes: 7 additions & 10 deletions gallery/plot_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
import torchvision.transforms as T


plt.rcParams["savefig.bbox"] = 'tight'
orig_img = Image.open(Path('assets') / 'astronaut.jpg')
plt.rcParams["savefig.bbox"] = "tight"
orig_img = Image.open(Path("assets") / "astronaut.jpg")
# if you change the seed, make sure that the randomly-applied transforms
# properly show that the image can be both transformed and *not* transformed!
torch.manual_seed(0)
Expand All @@ -41,7 +41,7 @@ def plot(imgs, with_orig=True, row_title=None, **imshow_kwargs):
ax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])

if with_orig:
axs[0, 0].set(title='Original image')
axs[0, 0].set(title="Original image")
axs[0, 0].title.set_size(8)
if row_title is not None:
for row_idx in range(num_rows):
Expand Down Expand Up @@ -93,7 +93,7 @@ def plot(imgs, with_orig=True, row_title=None, **imshow_kwargs):
# (see also :func:`~torchvision.transforms.functional.to_grayscale`)
# converts an image to grayscale
gray_img = T.Grayscale()(orig_img)
plot([gray_img], cmap='gray')
plot([gray_img], cmap="gray")

####################################
# Random transforms
Expand All @@ -105,7 +105,7 @@ def plot(imgs, with_orig=True, row_title=None, **imshow_kwargs):
# ~~~~~~~~~~~
# The :class:`~torchvision.transforms.ColorJitter` transform
# randomly changes the brightness, saturation, and other properties of an image.
jitter = T.ColorJitter(brightness=.5, hue=.3)
jitter = T.ColorJitter(brightness=0.5, hue=0.3)
jitted_imgs = [jitter(orig_img) for _ in range(4)]
plot(jitted_imgs)

Expand Down Expand Up @@ -240,11 +240,8 @@ def plot(imgs, with_orig=True, row_title=None, **imshow_kwargs):
# See :class:`~torchvision.transforms.AutoAugmentPolicy` for the available policies.
policies = [T.AutoAugmentPolicy.CIFAR10, T.AutoAugmentPolicy.IMAGENET, T.AutoAugmentPolicy.SVHN]
augmenters = [T.AutoAugment(policy) for policy in policies]
imgs = [
[augmenter(orig_img) for _ in range(4)]
for augmenter in augmenters
]
row_title = [str(policy).split('.')[-1] for policy in policies]
imgs = [[augmenter(orig_img) for _ in range(4)] for augmenter in augmenters]
row_title = [str(policy).split(".")[-1] for policy in policies]
plot(imgs, row_title=row_title)

####################################
Expand Down
76 changes: 38 additions & 38 deletions gallery/plot_video_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,7 @@

# Download the sample video
download_url(
"https://github.com/pytorch/vision/blob/main/test/assets/videos/WUzgd7C1pWA.mp4?raw=true",
".",
"WUzgd7C1pWA.mp4"
"https://github.com/pytorch/vision/blob/main/test/assets/videos/WUzgd7C1pWA.mp4?raw=true", ".", "WUzgd7C1pWA.mp4"
)
video_path = "./WUzgd7C1pWA.mp4"

Expand Down Expand Up @@ -75,12 +73,12 @@
frames = [] # we are going to save the frames here.
ptss = [] # pts is a presentation timestamp in seconds (float) of each frame
for frame in video:
frames.append(frame['data'])
ptss.append(frame['pts'])
frames.append(frame["data"])
ptss.append(frame["pts"])

print("PTS for first five frames ", ptss[:5])
print("Total number of frames: ", len(frames))
approx_nf = metadata['audio']['duration'][0] * metadata['audio']['framerate'][0]
approx_nf = metadata["audio"]["duration"][0] * metadata["audio"]["framerate"][0]
print("Approx total number of datapoints we can expect: ", approx_nf)
print("Read data size: ", frames[0].size(0) * len(frames))

Expand All @@ -96,6 +94,7 @@


import itertools

video.set_current_stream("video")

frames = [] # we are going to save the frames here.
Expand All @@ -116,11 +115,11 @@
frames = [] # we are going to save the frames here.
video = video.seek(2)

for frame in itertools.takewhile(lambda x: x['pts'] <= 5, video):
frames.append(frame['data'])
for frame in itertools.takewhile(lambda x: x["pts"] <= 5, video):
frames.append(frame["data"])

print("Total number of frames: ", len(frames))
approx_nf = (5 - 2) * video.get_metadata()['video']['fps'][0]
approx_nf = (5 - 2) * video.get_metadata()["video"]["fps"][0]
print("We can expect approx: ", approx_nf)
print("Tensor size: ", frames[0].size())

Expand All @@ -136,18 +135,17 @@ def example_read_video(video_object, start=0, end=None, read_video=True, read_au
end = float("inf")
if end < start:
raise ValueError(
"end time should be larger than start time, got "
"start time={} and end time={}".format(start, end)
"end time should be larger than start time, got " "start time={} and end time={}".format(start, end)
)

video_frames = torch.empty(0)
video_pts = []
if read_video:
video_object.set_current_stream("video")
frames = []
for frame in itertools.takewhile(lambda x: x['pts'] <= end, video_object.seek(start)):
frames.append(frame['data'])
video_pts.append(frame['pts'])
for frame in itertools.takewhile(lambda x: x["pts"] <= end, video_object.seek(start)):
frames.append(frame["data"])
video_pts.append(frame["pts"])
if len(frames) > 0:
video_frames = torch.stack(frames, 0)

Expand All @@ -156,9 +154,9 @@ def example_read_video(video_object, start=0, end=None, read_video=True, read_au
if read_audio:
video_object.set_current_stream("audio")
frames = []
for frame in itertools.takewhile(lambda x: x['pts'] <= end, video_object.seek(start)):
frames.append(frame['data'])
video_pts.append(frame['pts'])
for frame in itertools.takewhile(lambda x: x["pts"] <= end, video_object.seek(start)):
frames.append(frame["data"])
video_pts.append(frame["pts"])
if len(frames) > 0:
audio_frames = torch.cat(frames, 0)

Expand All @@ -179,36 +177,39 @@ def example_read_video(video_object, start=0, end=None, read_video=True, read_au
####################################
# Make sample dataset
import os

os.makedirs("./dataset", exist_ok=True)
os.makedirs("./dataset/1", exist_ok=True)
os.makedirs("./dataset/2", exist_ok=True)

####################################
# Download the videos
from torchvision.datasets.utils import download_url

download_url(
"https://github.com/pytorch/vision/blob/main/test/assets/videos/WUzgd7C1pWA.mp4?raw=true",
"./dataset/1", "WUzgd7C1pWA.mp4"
"./dataset/1",
"WUzgd7C1pWA.mp4",
)
download_url(
"https://github.com/pytorch/vision/blob/main/test/assets/videos/RATRACE_wave_f_nm_np1_fr_goo_37.avi?raw=true",
"./dataset/1",
"RATRACE_wave_f_nm_np1_fr_goo_37.avi"
"RATRACE_wave_f_nm_np1_fr_goo_37.avi",
)
download_url(
"https://github.com/pytorch/vision/blob/main/test/assets/videos/SOX5yA1l24A.mp4?raw=true",
"./dataset/2",
"SOX5yA1l24A.mp4"
"SOX5yA1l24A.mp4",
)
download_url(
"https://github.com/pytorch/vision/blob/main/test/assets/videos/v_SoccerJuggling_g23_c01.avi?raw=true",
"./dataset/2",
"v_SoccerJuggling_g23_c01.avi"
"v_SoccerJuggling_g23_c01.avi",
)
download_url(
"https://github.com/pytorch/vision/blob/main/test/assets/videos/v_SoccerJuggling_g24_c01.avi?raw=true",
"./dataset/2",
"v_SoccerJuggling_g24_c01.avi"
"v_SoccerJuggling_g24_c01.avi",
)

####################################
Expand All @@ -231,6 +232,7 @@ def get_samples(root, extensions=(".mp4", ".avi")):
_, class_to_idx = _find_classes(root)
return make_dataset(root, class_to_idx, extensions=extensions)


####################################
# We are going to define the dataset and some basic arguments.
# We assume the structure of the FolderDataset, and add the following parameters:
Expand Down Expand Up @@ -269,23 +271,19 @@ def __iter__(self):
video_frames = [] # video frame buffer

# Seek and return frames
max_seek = metadata["video"]['duration'][0] - (self.clip_len / metadata["video"]['fps'][0])
start = random.uniform(0., max_seek)
max_seek = metadata["video"]["duration"][0] - (self.clip_len / metadata["video"]["fps"][0])
start = random.uniform(0.0, max_seek)
for frame in itertools.islice(vid.seek(start), self.clip_len):
video_frames.append(self.frame_transform(frame['data']))
current_pts = frame['pts']
video_frames.append(self.frame_transform(frame["data"]))
current_pts = frame["pts"]
# Stack it into a tensor
video = torch.stack(video_frames, 0)
if self.video_transform:
video = self.video_transform(video)
output = {
'path': path,
'video': video,
'target': target,
'start': start,
'end': current_pts}
output = {"path": path, "video": video, "target": target, "start": start, "end": current_pts}
yield output


####################################
# Given a path of videos in a folder structure, i.e:
#
Expand All @@ -310,14 +308,15 @@ def __iter__(self):

####################################
from torch.utils.data import DataLoader

loader = DataLoader(dataset, batch_size=12)
data = {"video": [], 'start': [], 'end': [], 'tensorsize': []}
data = {"video": [], "start": [], "end": [], "tensorsize": []}
for batch in loader:
for i in range(len(batch['path'])):
data['video'].append(batch['path'][i])
data['start'].append(batch['start'][i].item())
data['end'].append(batch['end'][i].item())
data['tensorsize'].append(batch['video'][i].size())
for i in range(len(batch["path"])):
data["video"].append(batch["path"][i])
data["start"].append(batch["start"][i].item())
data["end"].append(batch["end"][i].item())
data["tensorsize"].append(batch["video"][i].size())
print(data)

####################################
Expand All @@ -337,5 +336,6 @@ def __iter__(self):
# Cleanup the video and dataset:
import os
import shutil

os.remove("./WUzgd7C1pWA.mp4")
shutil.rmtree("./dataset")
Loading