diff --git a/README.md b/README.md index 092b52bb4..ca4d616c0 100755 --- a/README.md +++ b/README.md @@ -1,3 +1,24 @@ +## StyleGAN2-ADA — `pip install` version of Official PyTorch implementation + +I have modified the official PyTorch implementation so that you can `pip install` this repository as a dependency and reuse the classes and functions here. + +### Requirements + +* Linux and Windows are supported, but we recommend Linux for performance and compatibility reasons. +* 1–8 high-end NVIDIA GPUs with at least 12 GB of memory. We have done all testing and development using NVIDIA DGX-1 with 8 Tesla V100 GPUs. +* 64-bit Python 3.7 and PyTorch 1.7.1. See [https://pytorch.org/](https://pytorch.org/) for PyTorch install instructions. +* CUDA toolkit 11.0 or later. Use at least version 11.1 if running on RTX 3090. (Why is a separate CUDA toolkit installation required? See comments in [#2](https://github.com/NVlabs/stylegan2-ada-pytorch/issues/2#issuecomment-779457121).) + +### Installation + +From repo's root directory `stylegan2-ada-pytorch`, run `python -m pip install .` + +### Original official implementation + +Available [here](https://github.com/NVlabs/stylegan2-ada-pytorch), the original `README.md` is copied below. + +*** + ## StyleGAN2-ADA — Official PyTorch implementation ![Teaser image](./docs/stylegan2-ada-teaser-1024x252.png) @@ -151,7 +172,7 @@ w = G.mapping(z, c, truncation_psi=0.5, truncation_cutoff=8) img = G.synthesis(w, noise_mode='const', force_fp32=True) ``` -Please refer to [`generate.py`](./generate.py), [`style_mixing.py`](./style_mixing.py), and [`projector.py`](./projector.py) for further examples. +Please refer to [`generate.py`](stylegan2_ada_pytorch/generate.py), [`style_mixing.py`](stylegan2_ada_pytorch/style_mixing.py), and [`projector.py`](stylegan2_ada_pytorch/projector.py) for further examples. ## Preparing datasets diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..0ad39d0b7 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,6 @@ +[build-system] +requires = [ + "setuptools>=42", + "wheel" +] +build-backend = "setuptools.build_meta" \ No newline at end of file diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 000000000..b91d8783d --- /dev/null +++ b/setup.cfg @@ -0,0 +1,30 @@ +[metadata] +name = stylegan2-ada-pytorch +version = 1.0.0 +description = StyleGAN2-ADA - Official PyTorch implementation +long_description = file: README.md +long_description_content_type = text/markdown +url = https://github.com/NVlabs/stylegan2-ada-pytorch +project_urls = + Bug Tracker = https://github.com/NVlabs/stylegan2-ada-pytorch/issues +classifiers = + Programming Language :: Python :: 3 + License :: OSI Approved :: MIT License + Operating System :: OS Independent + +[options] +package_dir = + = . +packages = find: +python_requires = >=3.6 +install_requires = + torch >=1.7.0 + click + requests + tqdm + pyspng + ninja + imageio-ffmpeg ==0.4.3 + +[options.packages.find] +where = . diff --git a/stylegan2_ada_pytorch/__init__.py b/stylegan2_ada_pytorch/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/calc_metrics.py b/stylegan2_ada_pytorch/calc_metrics.py old mode 100755 new mode 100644 similarity index 95% rename from calc_metrics.py rename to stylegan2_ada_pytorch/calc_metrics.py index 03e828195..b56ee8138 --- a/calc_metrics.py +++ b/stylegan2_ada_pytorch/calc_metrics.py @@ -14,14 +14,12 @@ import tempfile import copy import torch -import dnnlib -import legacy -from metrics import metric_main -from metrics import metric_utils -from torch_utils import training_stats -from torch_utils import custom_ops -from torch_utils import misc +from stylegan2_ada_pytorch import legacy, dnnlib +from stylegan2_ada_pytorch.metrics import metric_main, metric_utils +from stylegan2_ada_pytorch.torch_utils import training_stats +from stylegan2_ada_pytorch.torch_utils import custom_ops, misc + #---------------------------------------------------------------------------- @@ -61,7 +59,7 @@ def subprocess_fn(rank, args, temp_dir): print(f'Calculating {metric}...') progress = metric_utils.ProgressMonitor(verbose=args.verbose) result_dict = metric_main.calc_metric(metric=metric, G=G, dataset_kwargs=args.dataset_kwargs, - num_gpus=args.num_gpus, rank=rank, device=device, progress=progress) + num_gpus=args.num_gpus, rank=rank, device=device, progress=progress) if rank == 0: metric_main.report_metric(result_dict, run_dir=args.run_dir, snapshot_pkl=args.network_pkl) if rank == 0 and args.verbose: diff --git a/dataset_tool.py b/stylegan2_ada_pytorch/dataset_tool.py old mode 100755 new mode 100644 similarity index 100% rename from dataset_tool.py rename to stylegan2_ada_pytorch/dataset_tool.py diff --git a/dnnlib/__init__.py b/stylegan2_ada_pytorch/dnnlib/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from dnnlib/__init__.py rename to stylegan2_ada_pytorch/dnnlib/__init__.py diff --git a/dnnlib/util.py b/stylegan2_ada_pytorch/dnnlib/util.py old mode 100755 new mode 100644 similarity index 100% rename from dnnlib/util.py rename to stylegan2_ada_pytorch/dnnlib/util.py diff --git a/generate.py b/stylegan2_ada_pytorch/generate.py old mode 100755 new mode 100644 similarity index 99% rename from generate.py rename to stylegan2_ada_pytorch/generate.py index f7f961931..d992544a2 --- a/generate.py +++ b/stylegan2_ada_pytorch/generate.py @@ -13,12 +13,12 @@ from typing import List, Optional import click -import dnnlib import numpy as np import PIL.Image import torch -import legacy +from stylegan2_ada_pytorch import legacy, dnnlib + #---------------------------------------------------------------------------- diff --git a/legacy.py b/stylegan2_ada_pytorch/legacy.py old mode 100755 new mode 100644 similarity index 98% rename from legacy.py rename to stylegan2_ada_pytorch/legacy.py index 9387d79f2..4bf64784a --- a/legacy.py +++ b/stylegan2_ada_pytorch/legacy.py @@ -12,8 +12,9 @@ import copy import numpy as np import torch -import dnnlib -from torch_utils import misc +from stylegan2_ada_pytorch import dnnlib +from stylegan2_ada_pytorch.torch_utils import misc + #---------------------------------------------------------------------------- @@ -165,7 +166,7 @@ def kwarg(tf_name, default=None, none=None): #for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}') # Convert params. - from training import networks + from stylegan2_ada_pytorch.training import networks G = networks.Generator(**kwargs).eval().requires_grad_(False) # pylint: disable=unnecessary-lambda _populate_module_params(G, @@ -262,7 +263,7 @@ def kwarg(tf_name, default=None): #for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}') # Convert params. - from training import networks + from stylegan2_ada_pytorch.training import networks D = networks.Discriminator(**kwargs).eval().requires_grad_(False) # pylint: disable=unnecessary-lambda _populate_module_params(D, diff --git a/metrics/__init__.py b/stylegan2_ada_pytorch/metrics/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from metrics/__init__.py rename to stylegan2_ada_pytorch/metrics/__init__.py diff --git a/metrics/frechet_inception_distance.py b/stylegan2_ada_pytorch/metrics/frechet_inception_distance.py old mode 100755 new mode 100644 similarity index 100% rename from metrics/frechet_inception_distance.py rename to stylegan2_ada_pytorch/metrics/frechet_inception_distance.py diff --git a/metrics/inception_score.py b/stylegan2_ada_pytorch/metrics/inception_score.py old mode 100755 new mode 100644 similarity index 100% rename from metrics/inception_score.py rename to stylegan2_ada_pytorch/metrics/inception_score.py diff --git a/metrics/kernel_inception_distance.py b/stylegan2_ada_pytorch/metrics/kernel_inception_distance.py old mode 100755 new mode 100644 similarity index 100% rename from metrics/kernel_inception_distance.py rename to stylegan2_ada_pytorch/metrics/kernel_inception_distance.py diff --git a/metrics/metric_main.py b/stylegan2_ada_pytorch/metrics/metric_main.py old mode 100755 new mode 100644 similarity index 99% rename from metrics/metric_main.py rename to stylegan2_ada_pytorch/metrics/metric_main.py index 738804a6f..8c11c3208 --- a/metrics/metric_main.py +++ b/stylegan2_ada_pytorch/metrics/metric_main.py @@ -10,7 +10,7 @@ import time import json import torch -import dnnlib +from .. import dnnlib from . import metric_utils from . import frechet_inception_distance diff --git a/metrics/metric_utils.py b/stylegan2_ada_pytorch/metrics/metric_utils.py old mode 100755 new mode 100644 similarity index 99% rename from metrics/metric_utils.py rename to stylegan2_ada_pytorch/metrics/metric_utils.py index 16de1eae3..1f6f72933 --- a/metrics/metric_utils.py +++ b/stylegan2_ada_pytorch/metrics/metric_utils.py @@ -14,7 +14,8 @@ import uuid import numpy as np import torch -import dnnlib +from stylegan2_ada_pytorch import dnnlib + #---------------------------------------------------------------------------- @@ -156,7 +157,7 @@ def update(self, cur_items): total_time = cur_time - self.start_time time_per_item = (cur_time - self.batch_time) / max(cur_items - self.batch_items, 1) if (self.verbose) and (self.tag is not None): - print(f'{self.tag:<19s} items {cur_items:<7d} time {dnnlib.util.format_time(total_time):<12s} ms/item {time_per_item*1e3:.2f}') + print(f'{self.tag:<19s} items {cur_items:<7d} time {dnnlib.util.format_time(total_time):<12s} ms/item {time_per_item * 1e3:.2f}') self.batch_time = cur_time self.batch_items = cur_items diff --git a/metrics/perceptual_path_length.py b/stylegan2_ada_pytorch/metrics/perceptual_path_length.py old mode 100755 new mode 100644 similarity index 99% rename from metrics/perceptual_path_length.py rename to stylegan2_ada_pytorch/metrics/perceptual_path_length.py index d070f45a0..0b1131900 --- a/metrics/perceptual_path_length.py +++ b/stylegan2_ada_pytorch/metrics/perceptual_path_length.py @@ -14,7 +14,7 @@ import copy import numpy as np import torch -import dnnlib +from .. import dnnlib from . import metric_utils #---------------------------------------------------------------------------- diff --git a/metrics/precision_recall.py b/stylegan2_ada_pytorch/metrics/precision_recall.py old mode 100755 new mode 100644 similarity index 100% rename from metrics/precision_recall.py rename to stylegan2_ada_pytorch/metrics/precision_recall.py diff --git a/projector.py b/stylegan2_ada_pytorch/projector.py old mode 100755 new mode 100644 similarity index 99% rename from projector.py rename to stylegan2_ada_pytorch/projector.py index 36041a086..5718dc7f2 --- a/projector.py +++ b/stylegan2_ada_pytorch/projector.py @@ -19,8 +19,8 @@ import torch import torch.nn.functional as F -import dnnlib -import legacy +from stylegan2_ada_pytorch import legacy, dnnlib + def project( G, diff --git a/style_mixing.py b/stylegan2_ada_pytorch/style_mixing.py old mode 100755 new mode 100644 similarity index 98% rename from style_mixing.py rename to stylegan2_ada_pytorch/style_mixing.py index c47bebbc4..507dd2456 --- a/style_mixing.py +++ b/stylegan2_ada_pytorch/style_mixing.py @@ -13,12 +13,12 @@ from typing import List import click -import dnnlib import numpy as np import PIL.Image import torch -import legacy +from stylegan2_ada_pytorch import legacy, dnnlib + #---------------------------------------------------------------------------- diff --git a/torch_utils/__init__.py b/stylegan2_ada_pytorch/torch_utils/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from torch_utils/__init__.py rename to stylegan2_ada_pytorch/torch_utils/__init__.py diff --git a/torch_utils/custom_ops.py b/stylegan2_ada_pytorch/torch_utils/custom_ops.py old mode 100755 new mode 100644 similarity index 100% rename from torch_utils/custom_ops.py rename to stylegan2_ada_pytorch/torch_utils/custom_ops.py diff --git a/torch_utils/misc.py b/stylegan2_ada_pytorch/torch_utils/misc.py old mode 100755 new mode 100644 similarity index 99% rename from torch_utils/misc.py rename to stylegan2_ada_pytorch/torch_utils/misc.py index 7829f4d9f..fb27870de --- a/torch_utils/misc.py +++ b/stylegan2_ada_pytorch/torch_utils/misc.py @@ -11,7 +11,7 @@ import numpy as np import torch import warnings -import dnnlib +from stylegan2_ada_pytorch import dnnlib #---------------------------------------------------------------------------- # Cached construction of constant tensors. Avoids CPU=>GPU copy when the diff --git a/torch_utils/ops/__init__.py b/stylegan2_ada_pytorch/torch_utils/ops/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from torch_utils/ops/__init__.py rename to stylegan2_ada_pytorch/torch_utils/ops/__init__.py diff --git a/torch_utils/ops/bias_act.cpp b/stylegan2_ada_pytorch/torch_utils/ops/bias_act.cpp old mode 100755 new mode 100644 similarity index 100% rename from torch_utils/ops/bias_act.cpp rename to stylegan2_ada_pytorch/torch_utils/ops/bias_act.cpp diff --git a/torch_utils/ops/bias_act.cu b/stylegan2_ada_pytorch/torch_utils/ops/bias_act.cu old mode 100755 new mode 100644 similarity index 100% rename from torch_utils/ops/bias_act.cu rename to stylegan2_ada_pytorch/torch_utils/ops/bias_act.cu diff --git a/torch_utils/ops/bias_act.h b/stylegan2_ada_pytorch/torch_utils/ops/bias_act.h old mode 100755 new mode 100644 similarity index 100% rename from torch_utils/ops/bias_act.h rename to stylegan2_ada_pytorch/torch_utils/ops/bias_act.h diff --git a/torch_utils/ops/bias_act.py b/stylegan2_ada_pytorch/torch_utils/ops/bias_act.py old mode 100755 new mode 100644 similarity index 87% rename from torch_utils/ops/bias_act.py rename to stylegan2_ada_pytorch/torch_utils/ops/bias_act.py index 4bcb409a8..9655f32ba --- a/torch_utils/ops/bias_act.py +++ b/stylegan2_ada_pytorch/torch_utils/ops/bias_act.py @@ -12,7 +12,7 @@ import warnings import numpy as np import torch -import dnnlib +from stylegan2_ada_pytorch import dnnlib import traceback from .. import custom_ops @@ -21,15 +21,15 @@ #---------------------------------------------------------------------------- activation_funcs = { - 'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False), - 'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False), - 'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False), - 'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True), - 'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True), - 'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True), - 'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True), - 'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True), - 'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True), + 'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False), + 'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False), + 'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False), + 'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True), + 'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True), + 'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True), + 'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True), + 'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True), + 'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True), } #---------------------------------------------------------------------------- diff --git a/torch_utils/ops/conv2d_gradfix.py b/stylegan2_ada_pytorch/torch_utils/ops/conv2d_gradfix.py old mode 100755 new mode 100644 similarity index 100% rename from torch_utils/ops/conv2d_gradfix.py rename to stylegan2_ada_pytorch/torch_utils/ops/conv2d_gradfix.py diff --git a/torch_utils/ops/conv2d_resample.py b/stylegan2_ada_pytorch/torch_utils/ops/conv2d_resample.py old mode 100755 new mode 100644 similarity index 93% rename from torch_utils/ops/conv2d_resample.py rename to stylegan2_ada_pytorch/torch_utils/ops/conv2d_resample.py index cd4750744..f6359a432 --- a/torch_utils/ops/conv2d_resample.py +++ b/stylegan2_ada_pytorch/torch_utils/ops/conv2d_resample.py @@ -105,19 +105,19 @@ def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight # Fast path: 1x1 convolution with downsampling only => downsample first, then convolve. if kw == 1 and kh == 1 and (down > 1 and up == 1): - x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter) + x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0, px1, py0, py1], flip_filter=flip_filter) x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) return x # Fast path: 1x1 convolution with upsampling only => convolve first, then upsample. if kw == 1 and kh == 1 and (up > 1 and down == 1): x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) - x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter) + x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0, px1, py0, py1], gain=up ** 2, flip_filter=flip_filter) return x # Fast path: downsampling only => use strided convolution. if down > 1 and up == 1: - x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter) + x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0, px1, py0, py1], flip_filter=flip_filter) x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight) return x @@ -136,7 +136,7 @@ def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight pxt = max(min(-px0, -px1), 0) pyt = max(min(-py0, -py1), 0) x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight)) - x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter) + x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0 + pxt, px1 + pxt, py0 + pyt, py1 + pyt], gain=up ** 2, flip_filter=flip_filter) if down > 1: x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter) return x @@ -147,7 +147,7 @@ def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight) # Fallback: Generic reference implementation. - x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter) + x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0, px1, py0, py1], gain=up ** 2, flip_filter=flip_filter) x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) if down > 1: x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter) diff --git a/torch_utils/ops/fma.py b/stylegan2_ada_pytorch/torch_utils/ops/fma.py old mode 100755 new mode 100644 similarity index 100% rename from torch_utils/ops/fma.py rename to stylegan2_ada_pytorch/torch_utils/ops/fma.py diff --git a/torch_utils/ops/grid_sample_gradfix.py b/stylegan2_ada_pytorch/torch_utils/ops/grid_sample_gradfix.py old mode 100755 new mode 100644 similarity index 100% rename from torch_utils/ops/grid_sample_gradfix.py rename to stylegan2_ada_pytorch/torch_utils/ops/grid_sample_gradfix.py diff --git a/torch_utils/ops/upfirdn2d.cpp b/stylegan2_ada_pytorch/torch_utils/ops/upfirdn2d.cpp old mode 100755 new mode 100644 similarity index 100% rename from torch_utils/ops/upfirdn2d.cpp rename to stylegan2_ada_pytorch/torch_utils/ops/upfirdn2d.cpp diff --git a/torch_utils/ops/upfirdn2d.cu b/stylegan2_ada_pytorch/torch_utils/ops/upfirdn2d.cu old mode 100755 new mode 100644 similarity index 100% rename from torch_utils/ops/upfirdn2d.cu rename to stylegan2_ada_pytorch/torch_utils/ops/upfirdn2d.cu diff --git a/torch_utils/ops/upfirdn2d.h b/stylegan2_ada_pytorch/torch_utils/ops/upfirdn2d.h old mode 100755 new mode 100644 similarity index 100% rename from torch_utils/ops/upfirdn2d.h rename to stylegan2_ada_pytorch/torch_utils/ops/upfirdn2d.h diff --git a/torch_utils/ops/upfirdn2d.py b/stylegan2_ada_pytorch/torch_utils/ops/upfirdn2d.py old mode 100755 new mode 100644 similarity index 100% rename from torch_utils/ops/upfirdn2d.py rename to stylegan2_ada_pytorch/torch_utils/ops/upfirdn2d.py diff --git a/torch_utils/persistence.py b/stylegan2_ada_pytorch/torch_utils/persistence.py old mode 100755 new mode 100644 similarity index 99% rename from torch_utils/persistence.py rename to stylegan2_ada_pytorch/torch_utils/persistence.py index 0186cfd97..eabf88e3b --- a/torch_utils/persistence.py +++ b/stylegan2_ada_pytorch/torch_utils/persistence.py @@ -20,7 +20,7 @@ import copy import uuid import types -import dnnlib +from stylegan2_ada_pytorch import dnnlib #---------------------------------------------------------------------------- diff --git a/torch_utils/training_stats.py b/stylegan2_ada_pytorch/torch_utils/training_stats.py old mode 100755 new mode 100644 similarity index 99% rename from torch_utils/training_stats.py rename to stylegan2_ada_pytorch/torch_utils/training_stats.py index 26f467f9e..88745f25d --- a/torch_utils/training_stats.py +++ b/stylegan2_ada_pytorch/torch_utils/training_stats.py @@ -14,7 +14,7 @@ import re import numpy as np import torch -import dnnlib +from .. import dnnlib from . import misc diff --git a/train.py b/stylegan2_ada_pytorch/train.py old mode 100755 new mode 100644 similarity index 98% rename from train.py rename to stylegan2_ada_pytorch/train.py index 8d81b3f18..e32b5e08a --- a/train.py +++ b/stylegan2_ada_pytorch/train.py @@ -15,12 +15,13 @@ import json import tempfile import torch -import dnnlib +from stylegan2_ada_pytorch import dnnlib + +from stylegan2_ada_pytorch.training import training_loop +from stylegan2_ada_pytorch.metrics import metric_main +from stylegan2_ada_pytorch.torch_utils import training_stats +from stylegan2_ada_pytorch.torch_utils import custom_ops -from training import training_loop -from metrics import metric_main -from torch_utils import training_stats -from torch_utils import custom_ops #---------------------------------------------------------------------------- @@ -182,8 +183,8 @@ def setup_training_loop_kwargs( args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = 256 # clamp activations to avoid float16 overflow args.D_kwargs.epilogue_kwargs.mbstd_group_size = spec.mbstd - args.G_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8) - args.D_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8) + args.G_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0, 0.99], eps=1e-8) + args.D_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0, 0.99], eps=1e-8) args.loss_kwargs = dnnlib.EasyDict(class_name='training.loss.StyleGAN2Loss', r1_gamma=spec.gamma) args.total_kimg = spec.kimg diff --git a/training/__init__.py b/stylegan2_ada_pytorch/training/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from training/__init__.py rename to stylegan2_ada_pytorch/training/__init__.py diff --git a/training/augment.py b/stylegan2_ada_pytorch/training/augment.py old mode 100755 new mode 100644 similarity index 98% rename from training/augment.py rename to stylegan2_ada_pytorch/training/augment.py index 3efbf1270..6ccbb648a --- a/training/augment.py +++ b/stylegan2_ada_pytorch/training/augment.py @@ -9,11 +9,9 @@ import numpy as np import scipy.signal import torch -from torch_utils import persistence -from torch_utils import misc -from torch_utils.ops import upfirdn2d -from torch_utils.ops import grid_sample_gradfix -from torch_utils.ops import conv2d_gradfix +from stylegan2_ada_pytorch.torch_utils import persistence, misc +from stylegan2_ada_pytorch.torch_utils.ops import grid_sample_gradfix, upfirdn2d +from stylegan2_ada_pytorch.torch_utils.ops import conv2d_gradfix #---------------------------------------------------------------------------- # Coefficients of various wavelet decomposition low-pass filters. @@ -279,7 +277,7 @@ def forward(self, images, debug_percentile=None): margin = torch.cat([-margin, margin]).max(dim=1).values # [x0, y0, x1, y1] margin = margin + misc.constant([Hz_pad * 2 - cx, Hz_pad * 2 - cy] * 2, device=device) margin = margin.max(misc.constant([0, 0] * 2, device=device)) - margin = margin.min(misc.constant([width-1, height-1] * 2, device=device)) + margin = margin.min(misc.constant([width - 1, height - 1] * 2, device=device)) mx0, my0, mx1, my1 = margin.ceil().to(torch.int32) # Pad image and adjust origin. @@ -298,7 +296,7 @@ def forward(self, images, debug_percentile=None): images = grid_sample_gradfix.grid_sample(images, grid) # Downsample and crop. - images = upfirdn2d.downsample2d(x=images, f=self.Hz_geom, down=2, padding=-Hz_pad*2, flip_filter=True) + images = upfirdn2d.downsample2d(x=images, f=self.Hz_geom, down=2, padding=-Hz_pad * 2, flip_filter=True) # -------------------------------------------- # Select parameters for color transformations. @@ -395,8 +393,8 @@ def forward(self, images, debug_percentile=None): p = self.Hz_fbank.shape[1] // 2 images = images.reshape([1, batch_size * num_channels, height, width]) images = torch.nn.functional.pad(input=images, pad=[p,p,p,p], mode='reflect') - images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(2), groups=batch_size*num_channels) - images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(3), groups=batch_size*num_channels) + images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(2), groups=batch_size * num_channels) + images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(3), groups=batch_size * num_channels) images = images.reshape([batch_size, num_channels, height, width]) # ------------------------ diff --git a/training/dataset.py b/stylegan2_ada_pytorch/training/dataset.py old mode 100755 new mode 100644 similarity index 99% rename from training/dataset.py rename to stylegan2_ada_pytorch/training/dataset.py index 82dcabadd..e9d378aaa --- a/training/dataset.py +++ b/stylegan2_ada_pytorch/training/dataset.py @@ -12,7 +12,7 @@ import PIL.Image import json import torch -import dnnlib +from stylegan2_ada_pytorch import dnnlib try: import pyspng diff --git a/training/loss.py b/stylegan2_ada_pytorch/training/loss.py old mode 100755 new mode 100644 similarity index 97% rename from training/loss.py rename to stylegan2_ada_pytorch/training/loss.py index b87de446c..d952518c8 --- a/training/loss.py +++ b/stylegan2_ada_pytorch/training/loss.py @@ -8,9 +8,9 @@ import numpy as np import torch -from torch_utils import training_stats -from torch_utils import misc -from torch_utils.ops import conv2d_gradfix +from stylegan2_ada_pytorch.torch_utils import training_stats +from stylegan2_ada_pytorch.torch_utils import misc +from stylegan2_ada_pytorch.torch_utils.ops import conv2d_gradfix #---------------------------------------------------------------------------- diff --git a/training/networks.py b/stylegan2_ada_pytorch/training/networks.py old mode 100755 new mode 100644 similarity index 99% rename from training/networks.py rename to stylegan2_ada_pytorch/training/networks.py index b046eba2c..ce68908e6 --- a/training/networks.py +++ b/stylegan2_ada_pytorch/training/networks.py @@ -8,12 +8,10 @@ import numpy as np import torch -from torch_utils import misc -from torch_utils import persistence -from torch_utils.ops import conv2d_resample -from torch_utils.ops import upfirdn2d -from torch_utils.ops import bias_act -from torch_utils.ops import fma +from stylegan2_ada_pytorch.torch_utils import persistence, misc +from stylegan2_ada_pytorch.torch_utils.ops import conv2d_resample, upfirdn2d, fma +from stylegan2_ada_pytorch.torch_utils.ops import bias_act + #---------------------------------------------------------------------------- diff --git a/training/training_loop.py b/stylegan2_ada_pytorch/training/training_loop.py old mode 100755 new mode 100644 similarity index 95% rename from training/training_loop.py rename to stylegan2_ada_pytorch/training/training_loop.py index 14836ad2e..8b6457651 --- a/training/training_loop.py +++ b/stylegan2_ada_pytorch/training/training_loop.py @@ -15,14 +15,14 @@ import PIL.Image import numpy as np import torch -import dnnlib -from torch_utils import misc -from torch_utils import training_stats -from torch_utils.ops import conv2d_gradfix -from torch_utils.ops import grid_sample_gradfix +from stylegan2_ada_pytorch.torch_utils import misc +from stylegan2_ada_pytorch.torch_utils import training_stats +from stylegan2_ada_pytorch.torch_utils.ops import conv2d_gradfix +from stylegan2_ada_pytorch.torch_utils.ops import grid_sample_gradfix + +from stylegan2_ada_pytorch import legacy, dnnlib +from stylegan2_ada_pytorch.metrics import metric_main -import legacy -from metrics import metric_main #---------------------------------------------------------------------------- @@ -197,15 +197,15 @@ def training_loop( for name, module, opt_kwargs, reg_interval in [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]: if reg_interval is None: opt = dnnlib.util.construct_class_by_name(params=module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer - phases += [dnnlib.EasyDict(name=name+'both', module=module, opt=opt, interval=1)] + phases += [dnnlib.EasyDict(name=name + 'both', module=module, opt=opt, interval=1)] else: # Lazy regularization. mb_ratio = reg_interval / (reg_interval + 1) opt_kwargs = dnnlib.EasyDict(opt_kwargs) opt_kwargs.lr = opt_kwargs.lr * mb_ratio opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas] opt = dnnlib.util.construct_class_by_name(module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer - phases += [dnnlib.EasyDict(name=name+'main', module=module, opt=opt, interval=1)] - phases += [dnnlib.EasyDict(name=name+'reg', module=module, opt=opt, interval=reg_interval)] + phases += [dnnlib.EasyDict(name=name + 'main', module=module, opt=opt, interval=1)] + phases += [dnnlib.EasyDict(name=name + 'reg', module=module, opt=opt, interval=reg_interval)] for phase in phases: phase.start_event = None phase.end_event = None @@ -328,8 +328,8 @@ def training_loop( fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"] fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"] fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"] - fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}"] - fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}"] + fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2 ** 30):<6.2f}"] + fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2 ** 30):<6.2f}"] torch.cuda.reset_peak_memory_stats() fields += [f"augment {training_stats.report0('Progress/augment', float(augment_pipe.p.cpu()) if augment_pipe is not None else 0):.3f}"] training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60)) @@ -372,7 +372,7 @@ def training_loop( print('Evaluating metrics...') for metric in metrics: result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'], - dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device) + dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device) if rank == 0: metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl) stats_metrics.update(result_dict.results)