From dd06bf3b91cd851bcc07825c207a5bddc140cfa2 Mon Sep 17 00:00:00 2001
From: Yulong Lin <linyulong97@gmail.com>
Date: Mon, 6 Sep 2021 20:53:47 +0100
Subject: [PATCH 1/3] package code for pip install

---
 README.md                                     |  2 +-
 pyproject.toml                                |  6 +++
 setup.cfg                                     | 30 +++++++++++++
 stylegan2_ada_pytorch/__init__.py             |  0
 .../calc_metrics.py                           | 22 +++++-----
 .../dataset_tool.py                           |  0
 .../dnnlib}/__init__.py                       |  0
 .../dnnlib}/util.py                           |  0
 .../generate.py                               |  6 +--
 legacy.py => stylegan2_ada_pytorch/legacy.py  | 11 ++---
 .../metrics}/__init__.py                      |  0
 .../metrics}/frechet_inception_distance.py    |  0
 .../metrics}/inception_score.py               |  0
 .../metrics}/kernel_inception_distance.py     |  0
 .../metrics}/metric_main.py                   |  4 +-
 .../metrics}/metric_utils.py                  | 11 ++---
 .../metrics}/perceptual_path_length.py        |  4 +-
 .../metrics}/precision_recall.py              |  0
 .../projector.py                              |  8 ++--
 .../style_mixing.py                           |  6 +--
 .../torch_utils}/__init__.py                  |  0
 .../torch_utils}/custom_ops.py                |  0
 .../torch_utils}/misc.py                      |  2 +-
 .../torch_utils}/ops/__init__.py              |  0
 .../torch_utils}/ops/bias_act.cpp             |  0
 .../torch_utils}/ops/bias_act.cu              |  0
 .../torch_utils}/ops/bias_act.h               |  0
 .../torch_utils}/ops/bias_act.py              | 20 ++++-----
 .../torch_utils}/ops/conv2d_gradfix.py        |  0
 .../torch_utils}/ops/conv2d_resample.py       | 10 ++---
 .../torch_utils}/ops/fma.py                   |  0
 .../torch_utils}/ops/grid_sample_gradfix.py   |  0
 .../torch_utils}/ops/upfirdn2d.cpp            |  0
 .../torch_utils}/ops/upfirdn2d.cu             |  0
 .../torch_utils}/ops/upfirdn2d.h              |  0
 .../torch_utils}/ops/upfirdn2d.py             |  0
 .../torch_utils}/persistence.py               |  2 +-
 .../torch_utils}/training_stats.py            |  2 +-
 train.py => stylegan2_ada_pytorch/train.py    | 21 ++++-----
 .../training}/__init__.py                     |  0
 .../training}/augment.py                      | 16 +++----
 .../training}/dataset.py                      |  2 +-
 .../training}/loss.py                         |  6 +--
 .../training}/networks.py                     | 10 ++---
 .../training}/training_loop.py                | 44 +++++++++----------
 45 files changed, 139 insertions(+), 106 deletions(-)
 create mode 100644 pyproject.toml
 create mode 100644 setup.cfg
 create mode 100644 stylegan2_ada_pytorch/__init__.py
 rename calc_metrics.py => stylegan2_ada_pytorch/calc_metrics.py (91%)
 mode change 100755 => 100644
 rename dataset_tool.py => stylegan2_ada_pytorch/dataset_tool.py (100%)
 mode change 100755 => 100644
 rename {dnnlib => stylegan2_ada_pytorch/dnnlib}/__init__.py (100%)
 mode change 100755 => 100644
 rename {dnnlib => stylegan2_ada_pytorch/dnnlib}/util.py (100%)
 mode change 100755 => 100644
 rename generate.py => stylegan2_ada_pytorch/generate.py (97%)
 mode change 100755 => 100644
 rename legacy.py => stylegan2_ada_pytorch/legacy.py (98%)
 mode change 100755 => 100644
 rename {metrics => stylegan2_ada_pytorch/metrics}/__init__.py (100%)
 mode change 100755 => 100644
 rename {metrics => stylegan2_ada_pytorch/metrics}/frechet_inception_distance.py (100%)
 mode change 100755 => 100644
 rename {metrics => stylegan2_ada_pytorch/metrics}/inception_score.py (100%)
 mode change 100755 => 100644
 rename {metrics => stylegan2_ada_pytorch/metrics}/kernel_inception_distance.py (100%)
 mode change 100755 => 100644
 rename {metrics => stylegan2_ada_pytorch/metrics}/metric_main.py (98%)
 mode change 100755 => 100644
 rename {metrics => stylegan2_ada_pytorch/metrics}/metric_utils.py (96%)
 mode change 100755 => 100644
 rename {metrics => stylegan2_ada_pytorch/metrics}/perceptual_path_length.py (97%)
 mode change 100755 => 100644
 rename {metrics => stylegan2_ada_pytorch/metrics}/precision_recall.py (100%)
 mode change 100755 => 100644
 rename projector.py => stylegan2_ada_pytorch/projector.py (97%)
 mode change 100755 => 100644
 rename style_mixing.py => stylegan2_ada_pytorch/style_mixing.py (97%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/__init__.py (100%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/custom_ops.py (100%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/misc.py (99%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/ops/__init__.py (100%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/ops/bias_act.cpp (100%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/ops/bias_act.cu (100%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/ops/bias_act.h (100%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/ops/bias_act.py (87%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/ops/conv2d_gradfix.py (100%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/ops/conv2d_resample.py (93%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/ops/fma.py (100%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/ops/grid_sample_gradfix.py (100%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/ops/upfirdn2d.cpp (100%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/ops/upfirdn2d.cu (100%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/ops/upfirdn2d.h (100%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/ops/upfirdn2d.py (100%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/persistence.py (99%)
 mode change 100755 => 100644
 rename {torch_utils => stylegan2_ada_pytorch/torch_utils}/training_stats.py (99%)
 mode change 100755 => 100644
 rename train.py => stylegan2_ada_pytorch/train.py (97%)
 mode change 100755 => 100644
 rename {training => stylegan2_ada_pytorch/training}/__init__.py (100%)
 mode change 100755 => 100644
 rename {training => stylegan2_ada_pytorch/training}/augment.py (98%)
 mode change 100755 => 100644
 rename {training => stylegan2_ada_pytorch/training}/dataset.py (99%)
 mode change 100755 => 100644
 rename {training => stylegan2_ada_pytorch/training}/loss.py (97%)
 mode change 100755 => 100644
 rename {training => stylegan2_ada_pytorch/training}/networks.py (99%)
 mode change 100755 => 100644
 rename {training => stylegan2_ada_pytorch/training}/training_loop.py (89%)
 mode change 100755 => 100644

diff --git a/README.md b/README.md
index 092b52bb4..08145b1ad 100755
--- a/README.md
+++ b/README.md
@@ -151,7 +151,7 @@ w = G.mapping(z, c, truncation_psi=0.5, truncation_cutoff=8)
 img = G.synthesis(w, noise_mode='const', force_fp32=True)
 ```
 
-Please refer to [`generate.py`](./generate.py), [`style_mixing.py`](./style_mixing.py), and [`projector.py`](./projector.py) for further examples.
+Please refer to [`generate.py`](stylegan2_ada_pytorch/generate.py), [`style_mixing.py`](stylegan2_ada_pytorch/style_mixing.py), and [`projector.py`](stylegan2_ada_pytorch/projector.py) for further examples.
 
 ## Preparing datasets
 
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 000000000..0ad39d0b7
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,6 @@
+[build-system]
+requires = [
+    "setuptools>=42",
+    "wheel"
+]
+build-backend = "setuptools.build_meta"
\ No newline at end of file
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 000000000..b91d8783d
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,30 @@
+[metadata]
+name = stylegan2-ada-pytorch
+version = 1.0.0
+description =  StyleGAN2-ADA - Official PyTorch implementation
+long_description = file: README.md
+long_description_content_type = text/markdown
+url = https://github.com/NVlabs/stylegan2-ada-pytorch
+project_urls =
+    Bug Tracker = https://github.com/NVlabs/stylegan2-ada-pytorch/issues
+classifiers =
+    Programming Language :: Python :: 3
+    License :: OSI Approved :: MIT License
+    Operating System :: OS Independent
+
+[options]
+package_dir =
+    = .
+packages = find:
+python_requires = >=3.6
+install_requires =
+    torch >=1.7.0
+    click
+    requests
+    tqdm
+    pyspng
+    ninja
+    imageio-ffmpeg ==0.4.3
+
+[options.packages.find]
+where = .
diff --git a/stylegan2_ada_pytorch/__init__.py b/stylegan2_ada_pytorch/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/calc_metrics.py b/stylegan2_ada_pytorch/calc_metrics.py
old mode 100755
new mode 100644
similarity index 91%
rename from calc_metrics.py
rename to stylegan2_ada_pytorch/calc_metrics.py
index 03e828195..e7300c057
--- a/calc_metrics.py
+++ b/stylegan2_ada_pytorch/calc_metrics.py
@@ -14,19 +14,17 @@
 import tempfile
 import copy
 import torch
-import dnnlib
 
-import legacy
-from metrics import metric_main
-from metrics import metric_utils
-from torch_utils import training_stats
-from torch_utils import custom_ops
-from torch_utils import misc
+from stylegan2_ada_pytorch import legacy, dnnlib
+from stylegan2_ada_pytorch.metrics import metric_main, metric_utils
+from stylegan2_ada_pytorch.torch_utils import training_stats
+from stylegan2_ada_pytorch.torch_utils import custom_ops, misc
+
 
 #----------------------------------------------------------------------------
 
 def subprocess_fn(rank, args, temp_dir):
-    dnnlib.util.Logger(should_flush=True)
+    stylegan2_ada_pytorch.dnnlib.util.Logger(should_flush=True)
 
     # Init torch.distributed.
     if args.num_gpus > 1:
@@ -61,7 +59,7 @@ def subprocess_fn(rank, args, temp_dir):
             print(f'Calculating {metric}...')
         progress = metric_utils.ProgressMonitor(verbose=args.verbose)
         result_dict = metric_main.calc_metric(metric=metric, G=G, dataset_kwargs=args.dataset_kwargs,
-            num_gpus=args.num_gpus, rank=rank, device=device, progress=progress)
+                                              num_gpus=args.num_gpus, rank=rank, device=device, progress=progress)
         if rank == 0:
             metric_main.report_metric(result_dict, run_dir=args.run_dir, snapshot_pkl=args.network_pkl)
         if rank == 0 and args.verbose:
@@ -128,7 +126,7 @@ def calc_metrics(ctx, network_pkl, metrics, data, mirror, gpus, verbose):
         ppl_zend     Perceptual path length in Z at path endpoints against cropped image.
         ppl_wend     Perceptual path length in W at path endpoints against cropped image.
     """
-    dnnlib.util.Logger(should_flush=True)
+    stylegan2_ada_pytorch.dnnlib.util.Logger(should_flush=True)
 
     # Validate arguments.
     args = dnnlib.EasyDict(metrics=metrics, num_gpus=gpus, network_pkl=network_pkl, verbose=verbose)
@@ -138,11 +136,11 @@ def calc_metrics(ctx, network_pkl, metrics, data, mirror, gpus, verbose):
         ctx.fail('--gpus must be at least 1')
 
     # Load network.
-    if not dnnlib.util.is_url(network_pkl, allow_file_urls=True) and not os.path.isfile(network_pkl):
+    if not stylegan2_ada_pytorch.dnnlib.util.is_url(network_pkl, allow_file_urls=True) and not os.path.isfile(network_pkl):
         ctx.fail('--network must point to a file or URL')
     if args.verbose:
         print(f'Loading network from "{network_pkl}"...')
-    with dnnlib.util.open_url(network_pkl, verbose=args.verbose) as f:
+    with stylegan2_ada_pytorch.dnnlib.util.open_url(network_pkl, verbose=args.verbose) as f:
         network_dict = legacy.load_network_pkl(f)
         args.G = network_dict['G_ema'] # subclass of torch.nn.Module
 
diff --git a/dataset_tool.py b/stylegan2_ada_pytorch/dataset_tool.py
old mode 100755
new mode 100644
similarity index 100%
rename from dataset_tool.py
rename to stylegan2_ada_pytorch/dataset_tool.py
diff --git a/dnnlib/__init__.py b/stylegan2_ada_pytorch/dnnlib/__init__.py
old mode 100755
new mode 100644
similarity index 100%
rename from dnnlib/__init__.py
rename to stylegan2_ada_pytorch/dnnlib/__init__.py
diff --git a/dnnlib/util.py b/stylegan2_ada_pytorch/dnnlib/util.py
old mode 100755
new mode 100644
similarity index 100%
rename from dnnlib/util.py
rename to stylegan2_ada_pytorch/dnnlib/util.py
diff --git a/generate.py b/stylegan2_ada_pytorch/generate.py
old mode 100755
new mode 100644
similarity index 97%
rename from generate.py
rename to stylegan2_ada_pytorch/generate.py
index f7f961931..7341b6379
--- a/generate.py
+++ b/stylegan2_ada_pytorch/generate.py
@@ -13,12 +13,12 @@
 from typing import List, Optional
 
 import click
-import dnnlib
 import numpy as np
 import PIL.Image
 import torch
 
-import legacy
+from stylegan2_ada_pytorch import legacy, dnnlib
+
 
 #----------------------------------------------------------------------------
 
@@ -80,7 +80,7 @@ def generate_images(
 
     print('Loading networks from "%s"...' % network_pkl)
     device = torch.device('cuda')
-    with dnnlib.util.open_url(network_pkl) as f:
+    with stylegan2_ada_pytorch.dnnlib.util.open_url(network_pkl) as f:
         G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
 
     os.makedirs(outdir, exist_ok=True)
diff --git a/legacy.py b/stylegan2_ada_pytorch/legacy.py
old mode 100755
new mode 100644
similarity index 98%
rename from legacy.py
rename to stylegan2_ada_pytorch/legacy.py
index 9387d79f2..7914efccd
--- a/legacy.py
+++ b/stylegan2_ada_pytorch/legacy.py
@@ -12,8 +12,9 @@
 import copy
 import numpy as np
 import torch
-import dnnlib
-from torch_utils import misc
+from stylegan2_ada_pytorch import dnnlib
+from stylegan2_ada_pytorch.torch_utils import misc
+
 
 #----------------------------------------------------------------------------
 
@@ -165,7 +166,7 @@ def kwarg(tf_name, default=None, none=None):
     #for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
 
     # Convert params.
-    from training import networks
+    from stylegan2_ada_pytorch.training import networks
     G = networks.Generator(**kwargs).eval().requires_grad_(False)
     # pylint: disable=unnecessary-lambda
     _populate_module_params(G,
@@ -262,7 +263,7 @@ def kwarg(tf_name, default=None):
     #for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
 
     # Convert params.
-    from training import networks
+    from stylegan2_ada_pytorch.training import networks
     D = networks.Discriminator(**kwargs).eval().requires_grad_(False)
     # pylint: disable=unnecessary-lambda
     _populate_module_params(D,
@@ -305,7 +306,7 @@ def convert_network_pickle(source, dest, force_fp16):
         --dest=stylegan2-cat-config-f.pkl
     """
     print(f'Loading "{source}"...')
-    with dnnlib.util.open_url(source) as f:
+    with stylegan2_ada_pytorch.dnnlib.util.open_url(source) as f:
         data = load_network_pkl(f, force_fp16=force_fp16)
     print(f'Saving "{dest}"...')
     with open(dest, 'wb') as f:
diff --git a/metrics/__init__.py b/stylegan2_ada_pytorch/metrics/__init__.py
old mode 100755
new mode 100644
similarity index 100%
rename from metrics/__init__.py
rename to stylegan2_ada_pytorch/metrics/__init__.py
diff --git a/metrics/frechet_inception_distance.py b/stylegan2_ada_pytorch/metrics/frechet_inception_distance.py
old mode 100755
new mode 100644
similarity index 100%
rename from metrics/frechet_inception_distance.py
rename to stylegan2_ada_pytorch/metrics/frechet_inception_distance.py
diff --git a/metrics/inception_score.py b/stylegan2_ada_pytorch/metrics/inception_score.py
old mode 100755
new mode 100644
similarity index 100%
rename from metrics/inception_score.py
rename to stylegan2_ada_pytorch/metrics/inception_score.py
diff --git a/metrics/kernel_inception_distance.py b/stylegan2_ada_pytorch/metrics/kernel_inception_distance.py
old mode 100755
new mode 100644
similarity index 100%
rename from metrics/kernel_inception_distance.py
rename to stylegan2_ada_pytorch/metrics/kernel_inception_distance.py
diff --git a/metrics/metric_main.py b/stylegan2_ada_pytorch/metrics/metric_main.py
old mode 100755
new mode 100644
similarity index 98%
rename from metrics/metric_main.py
rename to stylegan2_ada_pytorch/metrics/metric_main.py
index 738804a6f..d81beba02
--- a/metrics/metric_main.py
+++ b/stylegan2_ada_pytorch/metrics/metric_main.py
@@ -10,7 +10,7 @@
 import time
 import json
 import torch
-import dnnlib
+from .. import dnnlib
 
 from . import metric_utils
 from . import frechet_inception_distance
@@ -58,7 +58,7 @@ def calc_metric(metric, **kwargs): # See metric_utils.MetricOptions for the full
         results         = dnnlib.EasyDict(results),
         metric          = metric,
         total_time      = total_time,
-        total_time_str  = dnnlib.util.format_time(total_time),
+        total_time_str  = stylegan2_ada_pytorch.dnnlib.util.format_time(total_time),
         num_gpus        = opts.num_gpus,
     )
 
diff --git a/metrics/metric_utils.py b/stylegan2_ada_pytorch/metrics/metric_utils.py
old mode 100755
new mode 100644
similarity index 96%
rename from metrics/metric_utils.py
rename to stylegan2_ada_pytorch/metrics/metric_utils.py
index 16de1eae3..70f1588d3
--- a/metrics/metric_utils.py
+++ b/stylegan2_ada_pytorch/metrics/metric_utils.py
@@ -14,7 +14,8 @@
 import uuid
 import numpy as np
 import torch
-import dnnlib
+from stylegan2_ada_pytorch import dnnlib
+
 
 #----------------------------------------------------------------------------
 
@@ -44,7 +45,7 @@ def get_feature_detector(url, device=torch.device('cpu'), num_gpus=1, rank=0, ve
         is_leader = (rank == 0)
         if not is_leader and num_gpus > 1:
             torch.distributed.barrier() # leader goes first
-        with dnnlib.util.open_url(url, verbose=(verbose and is_leader)) as f:
+        with stylegan2_ada_pytorch.dnnlib.util.open_url(url, verbose=(verbose and is_leader)) as f:
             _feature_detector_cache[key] = torch.jit.load(f).eval().to(device)
         if is_leader and num_gpus > 1:
             torch.distributed.barrier() # others follow
@@ -156,7 +157,7 @@ def update(self, cur_items):
         total_time = cur_time - self.start_time
         time_per_item = (cur_time - self.batch_time) / max(cur_items - self.batch_items, 1)
         if (self.verbose) and (self.tag is not None):
-            print(f'{self.tag:<19s} items {cur_items:<7d} time {dnnlib.util.format_time(total_time):<12s} ms/item {time_per_item*1e3:.2f}')
+            print(f'{self.tag:<19s} items {cur_items:<7d} time {stylegan2_ada_pytorch.dnnlib.util.format_time(total_time):<12s} ms/item {time_per_item * 1e3:.2f}')
         self.batch_time = cur_time
         self.batch_items = cur_items
 
@@ -178,7 +179,7 @@ def sub(self, tag=None, num_items=None, flush_interval=1000, rel_lo=0, rel_hi=1)
 #----------------------------------------------------------------------------
 
 def compute_feature_stats_for_dataset(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, data_loader_kwargs=None, max_items=None, **stats_kwargs):
-    dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
+    dataset = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
     if data_loader_kwargs is None:
         data_loader_kwargs = dict(pin_memory=True, num_workers=3, prefetch_factor=2)
 
@@ -236,7 +237,7 @@ def compute_feature_stats_for_generator(opts, detector_url, detector_kwargs, rel
 
     # Setup generator and load labels.
     G = copy.deepcopy(opts.G).eval().requires_grad_(False).to(opts.device)
-    dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
+    dataset = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
 
     # Image generation func.
     def run_generator(z, c):
diff --git a/metrics/perceptual_path_length.py b/stylegan2_ada_pytorch/metrics/perceptual_path_length.py
old mode 100755
new mode 100644
similarity index 97%
rename from metrics/perceptual_path_length.py
rename to stylegan2_ada_pytorch/metrics/perceptual_path_length.py
index d070f45a0..b14223fed
--- a/metrics/perceptual_path_length.py
+++ b/stylegan2_ada_pytorch/metrics/perceptual_path_length.py
@@ -14,7 +14,7 @@
 import copy
 import numpy as np
 import torch
-import dnnlib
+from .. import dnnlib
 from . import metric_utils
 
 #----------------------------------------------------------------------------
@@ -93,7 +93,7 @@ def forward(self, c):
 #----------------------------------------------------------------------------
 
 def compute_ppl(opts, num_samples, epsilon, space, sampling, crop, batch_size, jit=False):
-    dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
+    dataset = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
     vgg16_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
     vgg16 = metric_utils.get_feature_detector(vgg16_url, num_gpus=opts.num_gpus, rank=opts.rank, verbose=opts.progress.verbose)
 
diff --git a/metrics/precision_recall.py b/stylegan2_ada_pytorch/metrics/precision_recall.py
old mode 100755
new mode 100644
similarity index 100%
rename from metrics/precision_recall.py
rename to stylegan2_ada_pytorch/metrics/precision_recall.py
diff --git a/projector.py b/stylegan2_ada_pytorch/projector.py
old mode 100755
new mode 100644
similarity index 97%
rename from projector.py
rename to stylegan2_ada_pytorch/projector.py
index 36041a086..948cdf450
--- a/projector.py
+++ b/stylegan2_ada_pytorch/projector.py
@@ -19,8 +19,8 @@
 import torch
 import torch.nn.functional as F
 
-import dnnlib
-import legacy
+from stylegan2_ada_pytorch import legacy, dnnlib
+
 
 def project(
     G,
@@ -58,7 +58,7 @@ def logprint(*args):
 
     # Load VGG16 feature detector.
     url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
-    with dnnlib.util.open_url(url) as f:
+    with stylegan2_ada_pytorch.dnnlib.util.open_url(url) as f:
         vgg16 = torch.jit.load(f).eval().to(device)
 
     # Features for target image.
@@ -161,7 +161,7 @@ def run_projection(
     # Load networks.
     print('Loading networks from "%s"...' % network_pkl)
     device = torch.device('cuda')
-    with dnnlib.util.open_url(network_pkl) as fp:
+    with stylegan2_ada_pytorch.dnnlib.util.open_url(network_pkl) as fp:
         G = legacy.load_network_pkl(fp)['G_ema'].requires_grad_(False).to(device) # type: ignore
 
     # Load target image.
diff --git a/style_mixing.py b/stylegan2_ada_pytorch/style_mixing.py
old mode 100755
new mode 100644
similarity index 97%
rename from style_mixing.py
rename to stylegan2_ada_pytorch/style_mixing.py
index c47bebbc4..1a2612137
--- a/style_mixing.py
+++ b/stylegan2_ada_pytorch/style_mixing.py
@@ -13,12 +13,12 @@
 from typing import List
 
 import click
-import dnnlib
 import numpy as np
 import PIL.Image
 import torch
 
-import legacy
+from stylegan2_ada_pytorch import legacy, dnnlib
+
 
 #----------------------------------------------------------------------------
 
@@ -61,7 +61,7 @@ def generate_style_mix(
     """
     print('Loading networks from "%s"...' % network_pkl)
     device = torch.device('cuda')
-    with dnnlib.util.open_url(network_pkl) as f:
+    with stylegan2_ada_pytorch.dnnlib.util.open_url(network_pkl) as f:
         G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
 
     os.makedirs(outdir, exist_ok=True)
diff --git a/torch_utils/__init__.py b/stylegan2_ada_pytorch/torch_utils/__init__.py
old mode 100755
new mode 100644
similarity index 100%
rename from torch_utils/__init__.py
rename to stylegan2_ada_pytorch/torch_utils/__init__.py
diff --git a/torch_utils/custom_ops.py b/stylegan2_ada_pytorch/torch_utils/custom_ops.py
old mode 100755
new mode 100644
similarity index 100%
rename from torch_utils/custom_ops.py
rename to stylegan2_ada_pytorch/torch_utils/custom_ops.py
diff --git a/torch_utils/misc.py b/stylegan2_ada_pytorch/torch_utils/misc.py
old mode 100755
new mode 100644
similarity index 99%
rename from torch_utils/misc.py
rename to stylegan2_ada_pytorch/torch_utils/misc.py
index 7829f4d9f..fb27870de
--- a/torch_utils/misc.py
+++ b/stylegan2_ada_pytorch/torch_utils/misc.py
@@ -11,7 +11,7 @@
 import numpy as np
 import torch
 import warnings
-import dnnlib
+from stylegan2_ada_pytorch import dnnlib
 
 #----------------------------------------------------------------------------
 # Cached construction of constant tensors. Avoids CPU=>GPU copy when the
diff --git a/torch_utils/ops/__init__.py b/stylegan2_ada_pytorch/torch_utils/ops/__init__.py
old mode 100755
new mode 100644
similarity index 100%
rename from torch_utils/ops/__init__.py
rename to stylegan2_ada_pytorch/torch_utils/ops/__init__.py
diff --git a/torch_utils/ops/bias_act.cpp b/stylegan2_ada_pytorch/torch_utils/ops/bias_act.cpp
old mode 100755
new mode 100644
similarity index 100%
rename from torch_utils/ops/bias_act.cpp
rename to stylegan2_ada_pytorch/torch_utils/ops/bias_act.cpp
diff --git a/torch_utils/ops/bias_act.cu b/stylegan2_ada_pytorch/torch_utils/ops/bias_act.cu
old mode 100755
new mode 100644
similarity index 100%
rename from torch_utils/ops/bias_act.cu
rename to stylegan2_ada_pytorch/torch_utils/ops/bias_act.cu
diff --git a/torch_utils/ops/bias_act.h b/stylegan2_ada_pytorch/torch_utils/ops/bias_act.h
old mode 100755
new mode 100644
similarity index 100%
rename from torch_utils/ops/bias_act.h
rename to stylegan2_ada_pytorch/torch_utils/ops/bias_act.h
diff --git a/torch_utils/ops/bias_act.py b/stylegan2_ada_pytorch/torch_utils/ops/bias_act.py
old mode 100755
new mode 100644
similarity index 87%
rename from torch_utils/ops/bias_act.py
rename to stylegan2_ada_pytorch/torch_utils/ops/bias_act.py
index 4bcb409a8..4635d3eb1
--- a/torch_utils/ops/bias_act.py
+++ b/stylegan2_ada_pytorch/torch_utils/ops/bias_act.py
@@ -12,7 +12,7 @@
 import warnings
 import numpy as np
 import torch
-import dnnlib
+from ... import dnnlib
 import traceback
 
 from .. import custom_ops
@@ -21,15 +21,15 @@
 #----------------------------------------------------------------------------
 
 activation_funcs = {
-    'linear':   dnnlib.EasyDict(func=lambda x, **_:         x,                                          def_alpha=0,    def_gain=1,             cuda_idx=1, ref='',  has_2nd_grad=False),
-    'relu':     dnnlib.EasyDict(func=lambda x, **_:         torch.nn.functional.relu(x),                def_alpha=0,    def_gain=np.sqrt(2),    cuda_idx=2, ref='y', has_2nd_grad=False),
-    'lrelu':    dnnlib.EasyDict(func=lambda x, alpha, **_:  torch.nn.functional.leaky_relu(x, alpha),   def_alpha=0.2,  def_gain=np.sqrt(2),    cuda_idx=3, ref='y', has_2nd_grad=False),
-    'tanh':     dnnlib.EasyDict(func=lambda x, **_:         torch.tanh(x),                              def_alpha=0,    def_gain=1,             cuda_idx=4, ref='y', has_2nd_grad=True),
-    'sigmoid':  dnnlib.EasyDict(func=lambda x, **_:         torch.sigmoid(x),                           def_alpha=0,    def_gain=1,             cuda_idx=5, ref='y', has_2nd_grad=True),
-    'elu':      dnnlib.EasyDict(func=lambda x, **_:         torch.nn.functional.elu(x),                 def_alpha=0,    def_gain=1,             cuda_idx=6, ref='y', has_2nd_grad=True),
-    'selu':     dnnlib.EasyDict(func=lambda x, **_:         torch.nn.functional.selu(x),                def_alpha=0,    def_gain=1,             cuda_idx=7, ref='y', has_2nd_grad=True),
-    'softplus': dnnlib.EasyDict(func=lambda x, **_:         torch.nn.functional.softplus(x),            def_alpha=0,    def_gain=1,             cuda_idx=8, ref='y', has_2nd_grad=True),
-    'swish':    dnnlib.EasyDict(func=lambda x, **_:         torch.sigmoid(x) * x,                       def_alpha=0,    def_gain=np.sqrt(2),    cuda_idx=9, ref='x', has_2nd_grad=True),
+    'linear':   dnnlib.EasyDict(func=lambda x, **_:         x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False),
+    'relu':     dnnlib.EasyDict(func=lambda x, **_:         torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False),
+    'lrelu':    dnnlib.EasyDict(func=lambda x, alpha, **_:  torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False),
+    'tanh':     dnnlib.EasyDict(func=lambda x, **_:         torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True),
+    'sigmoid':  dnnlib.EasyDict(func=lambda x, **_:         torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True),
+    'elu':      dnnlib.EasyDict(func=lambda x, **_:         torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True),
+    'selu':     dnnlib.EasyDict(func=lambda x, **_:         torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True),
+    'softplus': dnnlib.EasyDict(func=lambda x, **_:         torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True),
+    'swish':    dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True),
 }
 
 #----------------------------------------------------------------------------
diff --git a/torch_utils/ops/conv2d_gradfix.py b/stylegan2_ada_pytorch/torch_utils/ops/conv2d_gradfix.py
old mode 100755
new mode 100644
similarity index 100%
rename from torch_utils/ops/conv2d_gradfix.py
rename to stylegan2_ada_pytorch/torch_utils/ops/conv2d_gradfix.py
diff --git a/torch_utils/ops/conv2d_resample.py b/stylegan2_ada_pytorch/torch_utils/ops/conv2d_resample.py
old mode 100755
new mode 100644
similarity index 93%
rename from torch_utils/ops/conv2d_resample.py
rename to stylegan2_ada_pytorch/torch_utils/ops/conv2d_resample.py
index cd4750744..f6359a432
--- a/torch_utils/ops/conv2d_resample.py
+++ b/stylegan2_ada_pytorch/torch_utils/ops/conv2d_resample.py
@@ -105,19 +105,19 @@ def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight
 
     # Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
     if kw == 1 and kh == 1 and (down > 1 and up == 1):
-        x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
+        x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0, px1, py0, py1], flip_filter=flip_filter)
         x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
         return x
 
     # Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
     if kw == 1 and kh == 1 and (up > 1 and down == 1):
         x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
-        x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
+        x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0, px1, py0, py1], gain=up ** 2, flip_filter=flip_filter)
         return x
 
     # Fast path: downsampling only => use strided convolution.
     if down > 1 and up == 1:
-        x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
+        x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0, px1, py0, py1], flip_filter=flip_filter)
         x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight)
         return x
 
@@ -136,7 +136,7 @@ def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight
         pxt = max(min(-px0, -px1), 0)
         pyt = max(min(-py0, -py1), 0)
         x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))
-        x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter)
+        x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0 + pxt, px1 + pxt, py0 + pyt, py1 + pyt], gain=up ** 2, flip_filter=flip_filter)
         if down > 1:
             x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
         return x
@@ -147,7 +147,7 @@ def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight
             return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight)
 
     # Fallback: Generic reference implementation.
-    x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
+    x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0, px1, py0, py1], gain=up ** 2, flip_filter=flip_filter)
     x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
     if down > 1:
         x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
diff --git a/torch_utils/ops/fma.py b/stylegan2_ada_pytorch/torch_utils/ops/fma.py
old mode 100755
new mode 100644
similarity index 100%
rename from torch_utils/ops/fma.py
rename to stylegan2_ada_pytorch/torch_utils/ops/fma.py
diff --git a/torch_utils/ops/grid_sample_gradfix.py b/stylegan2_ada_pytorch/torch_utils/ops/grid_sample_gradfix.py
old mode 100755
new mode 100644
similarity index 100%
rename from torch_utils/ops/grid_sample_gradfix.py
rename to stylegan2_ada_pytorch/torch_utils/ops/grid_sample_gradfix.py
diff --git a/torch_utils/ops/upfirdn2d.cpp b/stylegan2_ada_pytorch/torch_utils/ops/upfirdn2d.cpp
old mode 100755
new mode 100644
similarity index 100%
rename from torch_utils/ops/upfirdn2d.cpp
rename to stylegan2_ada_pytorch/torch_utils/ops/upfirdn2d.cpp
diff --git a/torch_utils/ops/upfirdn2d.cu b/stylegan2_ada_pytorch/torch_utils/ops/upfirdn2d.cu
old mode 100755
new mode 100644
similarity index 100%
rename from torch_utils/ops/upfirdn2d.cu
rename to stylegan2_ada_pytorch/torch_utils/ops/upfirdn2d.cu
diff --git a/torch_utils/ops/upfirdn2d.h b/stylegan2_ada_pytorch/torch_utils/ops/upfirdn2d.h
old mode 100755
new mode 100644
similarity index 100%
rename from torch_utils/ops/upfirdn2d.h
rename to stylegan2_ada_pytorch/torch_utils/ops/upfirdn2d.h
diff --git a/torch_utils/ops/upfirdn2d.py b/stylegan2_ada_pytorch/torch_utils/ops/upfirdn2d.py
old mode 100755
new mode 100644
similarity index 100%
rename from torch_utils/ops/upfirdn2d.py
rename to stylegan2_ada_pytorch/torch_utils/ops/upfirdn2d.py
diff --git a/torch_utils/persistence.py b/stylegan2_ada_pytorch/torch_utils/persistence.py
old mode 100755
new mode 100644
similarity index 99%
rename from torch_utils/persistence.py
rename to stylegan2_ada_pytorch/torch_utils/persistence.py
index 0186cfd97..eabf88e3b
--- a/torch_utils/persistence.py
+++ b/stylegan2_ada_pytorch/torch_utils/persistence.py
@@ -20,7 +20,7 @@
 import copy
 import uuid
 import types
-import dnnlib
+from stylegan2_ada_pytorch import dnnlib
 
 #----------------------------------------------------------------------------
 
diff --git a/torch_utils/training_stats.py b/stylegan2_ada_pytorch/torch_utils/training_stats.py
old mode 100755
new mode 100644
similarity index 99%
rename from torch_utils/training_stats.py
rename to stylegan2_ada_pytorch/torch_utils/training_stats.py
index 26f467f9e..88745f25d
--- a/torch_utils/training_stats.py
+++ b/stylegan2_ada_pytorch/torch_utils/training_stats.py
@@ -14,7 +14,7 @@
 import re
 import numpy as np
 import torch
-import dnnlib
+from .. import dnnlib
 
 from . import misc
 
diff --git a/train.py b/stylegan2_ada_pytorch/train.py
old mode 100755
new mode 100644
similarity index 97%
rename from train.py
rename to stylegan2_ada_pytorch/train.py
index 8d81b3f18..c2e61c9eb
--- a/train.py
+++ b/stylegan2_ada_pytorch/train.py
@@ -15,12 +15,13 @@
 import json
 import tempfile
 import torch
-import dnnlib
+from stylegan2_ada_pytorch import dnnlib
+
+from stylegan2_ada_pytorch.training import training_loop
+from stylegan2_ada_pytorch.metrics import metric_main
+from stylegan2_ada_pytorch.torch_utils import training_stats
+from stylegan2_ada_pytorch.torch_utils import custom_ops
 
-from training import training_loop
-from metrics import metric_main
-from torch_utils import training_stats
-from torch_utils import custom_ops
 
 #----------------------------------------------------------------------------
 
@@ -107,7 +108,7 @@ def setup_training_loop_kwargs(
     args.training_set_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data, use_labels=True, max_size=None, xflip=False)
     args.data_loader_kwargs = dnnlib.EasyDict(pin_memory=True, num_workers=3, prefetch_factor=2)
     try:
-        training_set = dnnlib.util.construct_class_by_name(**args.training_set_kwargs) # subclass of training.dataset.Dataset
+        training_set = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**args.training_set_kwargs) # subclass of training.dataset.Dataset
         args.training_set_kwargs.resolution = training_set.resolution # be explicit about resolution
         args.training_set_kwargs.use_labels = training_set.has_labels # be explicit about labels
         args.training_set_kwargs.max_size = len(training_set) # be explicit about dataset size
@@ -182,8 +183,8 @@ def setup_training_loop_kwargs(
     args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = 256 # clamp activations to avoid float16 overflow
     args.D_kwargs.epilogue_kwargs.mbstd_group_size = spec.mbstd
 
-    args.G_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)
-    args.D_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)
+    args.G_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0, 0.99], eps=1e-8)
+    args.D_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0, 0.99], eps=1e-8)
     args.loss_kwargs = dnnlib.EasyDict(class_name='training.loss.StyleGAN2Loss', r1_gamma=spec.gamma)
 
     args.total_kimg = spec.kimg
@@ -361,7 +362,7 @@ def setup_training_loop_kwargs(
 #----------------------------------------------------------------------------
 
 def subprocess_fn(rank, args, temp_dir):
-    dnnlib.util.Logger(file_name=os.path.join(args.run_dir, 'log.txt'), file_mode='a', should_flush=True)
+    stylegan2_ada_pytorch.dnnlib.util.Logger(file_name=os.path.join(args.run_dir, 'log.txt'), file_mode='a', should_flush=True)
 
     # Init torch.distributed.
     if args.num_gpus > 1:
@@ -479,7 +480,7 @@ def main(ctx, outdir, dry_run, **config_kwargs):
       lsundog256     LSUN Dog trained at 256x256 resolution.
       <PATH or URL>  Custom network pickle.
     """
-    dnnlib.util.Logger(should_flush=True)
+    stylegan2_ada_pytorch.dnnlib.util.Logger(should_flush=True)
 
     # Setup training options.
     try:
diff --git a/training/__init__.py b/stylegan2_ada_pytorch/training/__init__.py
old mode 100755
new mode 100644
similarity index 100%
rename from training/__init__.py
rename to stylegan2_ada_pytorch/training/__init__.py
diff --git a/training/augment.py b/stylegan2_ada_pytorch/training/augment.py
old mode 100755
new mode 100644
similarity index 98%
rename from training/augment.py
rename to stylegan2_ada_pytorch/training/augment.py
index 3efbf1270..6ccbb648a
--- a/training/augment.py
+++ b/stylegan2_ada_pytorch/training/augment.py
@@ -9,11 +9,9 @@
 import numpy as np
 import scipy.signal
 import torch
-from torch_utils import persistence
-from torch_utils import misc
-from torch_utils.ops import upfirdn2d
-from torch_utils.ops import grid_sample_gradfix
-from torch_utils.ops import conv2d_gradfix
+from stylegan2_ada_pytorch.torch_utils import persistence, misc
+from stylegan2_ada_pytorch.torch_utils.ops import grid_sample_gradfix, upfirdn2d
+from stylegan2_ada_pytorch.torch_utils.ops import conv2d_gradfix
 
 #----------------------------------------------------------------------------
 # Coefficients of various wavelet decomposition low-pass filters.
@@ -279,7 +277,7 @@ def forward(self, images, debug_percentile=None):
             margin = torch.cat([-margin, margin]).max(dim=1).values # [x0, y0, x1, y1]
             margin = margin + misc.constant([Hz_pad * 2 - cx, Hz_pad * 2 - cy] * 2, device=device)
             margin = margin.max(misc.constant([0, 0] * 2, device=device))
-            margin = margin.min(misc.constant([width-1, height-1] * 2, device=device))
+            margin = margin.min(misc.constant([width - 1, height - 1] * 2, device=device))
             mx0, my0, mx1, my1 = margin.ceil().to(torch.int32)
 
             # Pad image and adjust origin.
@@ -298,7 +296,7 @@ def forward(self, images, debug_percentile=None):
             images = grid_sample_gradfix.grid_sample(images, grid)
 
             # Downsample and crop.
-            images = upfirdn2d.downsample2d(x=images, f=self.Hz_geom, down=2, padding=-Hz_pad*2, flip_filter=True)
+            images = upfirdn2d.downsample2d(x=images, f=self.Hz_geom, down=2, padding=-Hz_pad * 2, flip_filter=True)
 
         # --------------------------------------------
         # Select parameters for color transformations.
@@ -395,8 +393,8 @@ def forward(self, images, debug_percentile=None):
             p = self.Hz_fbank.shape[1] // 2
             images = images.reshape([1, batch_size * num_channels, height, width])
             images = torch.nn.functional.pad(input=images, pad=[p,p,p,p], mode='reflect')
-            images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(2), groups=batch_size*num_channels)
-            images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(3), groups=batch_size*num_channels)
+            images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(2), groups=batch_size * num_channels)
+            images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(3), groups=batch_size * num_channels)
             images = images.reshape([batch_size, num_channels, height, width])
 
         # ------------------------
diff --git a/training/dataset.py b/stylegan2_ada_pytorch/training/dataset.py
old mode 100755
new mode 100644
similarity index 99%
rename from training/dataset.py
rename to stylegan2_ada_pytorch/training/dataset.py
index 82dcabadd..e9d378aaa
--- a/training/dataset.py
+++ b/stylegan2_ada_pytorch/training/dataset.py
@@ -12,7 +12,7 @@
 import PIL.Image
 import json
 import torch
-import dnnlib
+from stylegan2_ada_pytorch import dnnlib
 
 try:
     import pyspng
diff --git a/training/loss.py b/stylegan2_ada_pytorch/training/loss.py
old mode 100755
new mode 100644
similarity index 97%
rename from training/loss.py
rename to stylegan2_ada_pytorch/training/loss.py
index b87de446c..d952518c8
--- a/training/loss.py
+++ b/stylegan2_ada_pytorch/training/loss.py
@@ -8,9 +8,9 @@
 
 import numpy as np
 import torch
-from torch_utils import training_stats
-from torch_utils import misc
-from torch_utils.ops import conv2d_gradfix
+from stylegan2_ada_pytorch.torch_utils import training_stats
+from stylegan2_ada_pytorch.torch_utils import misc
+from stylegan2_ada_pytorch.torch_utils.ops import conv2d_gradfix
 
 #----------------------------------------------------------------------------
 
diff --git a/training/networks.py b/stylegan2_ada_pytorch/training/networks.py
old mode 100755
new mode 100644
similarity index 99%
rename from training/networks.py
rename to stylegan2_ada_pytorch/training/networks.py
index b046eba2c..ce68908e6
--- a/training/networks.py
+++ b/stylegan2_ada_pytorch/training/networks.py
@@ -8,12 +8,10 @@
 
 import numpy as np
 import torch
-from torch_utils import misc
-from torch_utils import persistence
-from torch_utils.ops import conv2d_resample
-from torch_utils.ops import upfirdn2d
-from torch_utils.ops import bias_act
-from torch_utils.ops import fma
+from stylegan2_ada_pytorch.torch_utils import persistence, misc
+from stylegan2_ada_pytorch.torch_utils.ops import conv2d_resample, upfirdn2d, fma
+from stylegan2_ada_pytorch.torch_utils.ops import bias_act
+
 
 #----------------------------------------------------------------------------
 
diff --git a/training/training_loop.py b/stylegan2_ada_pytorch/training/training_loop.py
old mode 100755
new mode 100644
similarity index 89%
rename from training/training_loop.py
rename to stylegan2_ada_pytorch/training/training_loop.py
index 14836ad2e..f32a8819e
--- a/training/training_loop.py
+++ b/stylegan2_ada_pytorch/training/training_loop.py
@@ -15,14 +15,14 @@
 import PIL.Image
 import numpy as np
 import torch
-import dnnlib
-from torch_utils import misc
-from torch_utils import training_stats
-from torch_utils.ops import conv2d_gradfix
-from torch_utils.ops import grid_sample_gradfix
+from stylegan2_ada_pytorch.torch_utils import misc
+from stylegan2_ada_pytorch.torch_utils import training_stats
+from stylegan2_ada_pytorch.torch_utils.ops import conv2d_gradfix
+from stylegan2_ada_pytorch.torch_utils.ops import grid_sample_gradfix
+
+from stylegan2_ada_pytorch import legacy, dnnlib
+from stylegan2_ada_pytorch.metrics import metric_main
 
-import legacy
-from metrics import metric_main
 
 #----------------------------------------------------------------------------
 
@@ -133,7 +133,7 @@ def training_loop(
     # Load training set.
     if rank == 0:
         print('Loading training set...')
-    training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset
+    training_set = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset
     training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed)
     training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs))
     if rank == 0:
@@ -147,14 +147,14 @@ def training_loop(
     if rank == 0:
         print('Constructing networks...')
     common_kwargs = dict(c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels)
-    G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
-    D = dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
+    G = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
+    D = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
     G_ema = copy.deepcopy(G).eval()
 
     # Resume from existing pickle.
     if (resume_pkl is not None) and (rank == 0):
         print(f'Resuming from "{resume_pkl}"')
-        with dnnlib.util.open_url(resume_pkl) as f:
+        with stylegan2_ada_pytorch.dnnlib.util.open_url(resume_pkl) as f:
             resume_data = legacy.load_network_pkl(f)
         for name, module in [('G', G), ('D', D), ('G_ema', G_ema)]:
             misc.copy_params_and_buffers(resume_data[name], module, require_all=False)
@@ -172,7 +172,7 @@ def training_loop(
     augment_pipe = None
     ada_stats = None
     if (augment_kwargs is not None) and (augment_p > 0 or ada_target is not None):
-        augment_pipe = dnnlib.util.construct_class_by_name(**augment_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
+        augment_pipe = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**augment_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
         augment_pipe.p.copy_(torch.as_tensor(augment_p))
         if ada_target is not None:
             ada_stats = training_stats.Collector(regex='Loss/signs/real')
@@ -192,20 +192,20 @@ def training_loop(
     # Setup training phases.
     if rank == 0:
         print('Setting up training phases...')
-    loss = dnnlib.util.construct_class_by_name(device=device, **ddp_modules, **loss_kwargs) # subclass of training.loss.Loss
+    loss = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(device=device, **ddp_modules, **loss_kwargs) # subclass of training.loss.Loss
     phases = []
     for name, module, opt_kwargs, reg_interval in [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]:
         if reg_interval is None:
-            opt = dnnlib.util.construct_class_by_name(params=module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
-            phases += [dnnlib.EasyDict(name=name+'both', module=module, opt=opt, interval=1)]
+            opt = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(params=module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
+            phases += [dnnlib.EasyDict(name=name + 'both', module=module, opt=opt, interval=1)]
         else: # Lazy regularization.
             mb_ratio = reg_interval / (reg_interval + 1)
             opt_kwargs = dnnlib.EasyDict(opt_kwargs)
             opt_kwargs.lr = opt_kwargs.lr * mb_ratio
             opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas]
-            opt = dnnlib.util.construct_class_by_name(module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
-            phases += [dnnlib.EasyDict(name=name+'main', module=module, opt=opt, interval=1)]
-            phases += [dnnlib.EasyDict(name=name+'reg', module=module, opt=opt, interval=reg_interval)]
+            opt = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
+            phases += [dnnlib.EasyDict(name=name + 'main', module=module, opt=opt, interval=1)]
+            phases += [dnnlib.EasyDict(name=name + 'reg', module=module, opt=opt, interval=reg_interval)]
     for phase in phases:
         phase.start_event = None
         phase.end_event = None
@@ -324,12 +324,12 @@ def training_loop(
         fields = []
         fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
         fields += [f"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}"]
-        fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"]
+        fields += [f"time {stylegan2_ada_pytorch.dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"]
         fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"]
         fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"]
         fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"]
-        fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}"]
-        fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}"]
+        fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2 ** 30):<6.2f}"]
+        fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2 ** 30):<6.2f}"]
         torch.cuda.reset_peak_memory_stats()
         fields += [f"augment {training_stats.report0('Progress/augment', float(augment_pipe.p.cpu()) if augment_pipe is not None else 0):.3f}"]
         training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60))
@@ -372,7 +372,7 @@ def training_loop(
                 print('Evaluating metrics...')
             for metric in metrics:
                 result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'],
-                    dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device)
+                                                      dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device)
                 if rank == 0:
                     metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl)
                 stats_metrics.update(result_dict.results)

From 28b848daf6ba18feec3acf749cc687352c6e8507 Mon Sep 17 00:00:00 2001
From: Yulong Lin <linyulong97@gmail.com>
Date: Tue, 7 Sep 2021 15:53:24 +0100
Subject: [PATCH 2/3] add pip install instructions to README

---
 README.md | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)

diff --git a/README.md b/README.md
index 08145b1ad..ca4d616c0 100755
--- a/README.md
+++ b/README.md
@@ -1,3 +1,24 @@
+## StyleGAN2-ADA &mdash; `pip install` version of Official PyTorch implementation
+
+I have modified the official PyTorch implementation so that you can `pip install` this repository as a dependency and reuse the classes and functions here.
+
+### Requirements
+
+* Linux and Windows are supported, but we recommend Linux for performance and compatibility reasons.
+* 1&ndash;8 high-end NVIDIA GPUs with at least 12 GB of memory. We have done all testing and development using NVIDIA DGX-1 with 8 Tesla V100 GPUs.
+* 64-bit Python 3.7 and PyTorch 1.7.1. See [https://pytorch.org/](https://pytorch.org/) for PyTorch install instructions.
+* CUDA toolkit 11.0 or later.  Use at least version 11.1 if running on RTX 3090.  (Why is a separate CUDA toolkit installation required?  See comments in [#2](https://github.com/NVlabs/stylegan2-ada-pytorch/issues/2#issuecomment-779457121).)
+
+### Installation
+
+From repo's root directory `stylegan2-ada-pytorch`, run `python -m pip install .`
+
+### Original official implementation
+
+Available [here](https://github.com/NVlabs/stylegan2-ada-pytorch), the original `README.md` is copied below.
+
+***
+
 ## StyleGAN2-ADA &mdash; Official PyTorch implementation
 
 ![Teaser image](./docs/stylegan2-ada-teaser-1024x252.png)

From 90db9fe64673323d8519e53486f36ff610dc0605 Mon Sep 17 00:00:00 2001
From: Yulong Lin <linyulong97@gmail.com>
Date: Wed, 15 Sep 2021 20:24:59 +0100
Subject: [PATCH 3/3] Fix dnnlib module imports

---
 stylegan2_ada_pytorch/calc_metrics.py          |  8 ++++----
 stylegan2_ada_pytorch/generate.py              |  2 +-
 stylegan2_ada_pytorch/legacy.py                |  2 +-
 stylegan2_ada_pytorch/metrics/metric_main.py   |  2 +-
 stylegan2_ada_pytorch/metrics/metric_utils.py  |  8 ++++----
 .../metrics/perceptual_path_length.py          |  2 +-
 stylegan2_ada_pytorch/projector.py             |  4 ++--
 stylegan2_ada_pytorch/style_mixing.py          |  2 +-
 .../torch_utils/ops/bias_act.py                |  2 +-
 stylegan2_ada_pytorch/train.py                 |  6 +++---
 .../training/training_loop.py                  | 18 +++++++++---------
 11 files changed, 28 insertions(+), 28 deletions(-)

diff --git a/stylegan2_ada_pytorch/calc_metrics.py b/stylegan2_ada_pytorch/calc_metrics.py
index e7300c057..b56ee8138 100644
--- a/stylegan2_ada_pytorch/calc_metrics.py
+++ b/stylegan2_ada_pytorch/calc_metrics.py
@@ -24,7 +24,7 @@
 #----------------------------------------------------------------------------
 
 def subprocess_fn(rank, args, temp_dir):
-    stylegan2_ada_pytorch.dnnlib.util.Logger(should_flush=True)
+    dnnlib.util.Logger(should_flush=True)
 
     # Init torch.distributed.
     if args.num_gpus > 1:
@@ -126,7 +126,7 @@ def calc_metrics(ctx, network_pkl, metrics, data, mirror, gpus, verbose):
         ppl_zend     Perceptual path length in Z at path endpoints against cropped image.
         ppl_wend     Perceptual path length in W at path endpoints against cropped image.
     """
-    stylegan2_ada_pytorch.dnnlib.util.Logger(should_flush=True)
+    dnnlib.util.Logger(should_flush=True)
 
     # Validate arguments.
     args = dnnlib.EasyDict(metrics=metrics, num_gpus=gpus, network_pkl=network_pkl, verbose=verbose)
@@ -136,11 +136,11 @@ def calc_metrics(ctx, network_pkl, metrics, data, mirror, gpus, verbose):
         ctx.fail('--gpus must be at least 1')
 
     # Load network.
-    if not stylegan2_ada_pytorch.dnnlib.util.is_url(network_pkl, allow_file_urls=True) and not os.path.isfile(network_pkl):
+    if not dnnlib.util.is_url(network_pkl, allow_file_urls=True) and not os.path.isfile(network_pkl):
         ctx.fail('--network must point to a file or URL')
     if args.verbose:
         print(f'Loading network from "{network_pkl}"...')
-    with stylegan2_ada_pytorch.dnnlib.util.open_url(network_pkl, verbose=args.verbose) as f:
+    with dnnlib.util.open_url(network_pkl, verbose=args.verbose) as f:
         network_dict = legacy.load_network_pkl(f)
         args.G = network_dict['G_ema'] # subclass of torch.nn.Module
 
diff --git a/stylegan2_ada_pytorch/generate.py b/stylegan2_ada_pytorch/generate.py
index 7341b6379..d992544a2 100644
--- a/stylegan2_ada_pytorch/generate.py
+++ b/stylegan2_ada_pytorch/generate.py
@@ -80,7 +80,7 @@ def generate_images(
 
     print('Loading networks from "%s"...' % network_pkl)
     device = torch.device('cuda')
-    with stylegan2_ada_pytorch.dnnlib.util.open_url(network_pkl) as f:
+    with dnnlib.util.open_url(network_pkl) as f:
         G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
 
     os.makedirs(outdir, exist_ok=True)
diff --git a/stylegan2_ada_pytorch/legacy.py b/stylegan2_ada_pytorch/legacy.py
index 7914efccd..4bf64784a 100644
--- a/stylegan2_ada_pytorch/legacy.py
+++ b/stylegan2_ada_pytorch/legacy.py
@@ -306,7 +306,7 @@ def convert_network_pickle(source, dest, force_fp16):
         --dest=stylegan2-cat-config-f.pkl
     """
     print(f'Loading "{source}"...')
-    with stylegan2_ada_pytorch.dnnlib.util.open_url(source) as f:
+    with dnnlib.util.open_url(source) as f:
         data = load_network_pkl(f, force_fp16=force_fp16)
     print(f'Saving "{dest}"...')
     with open(dest, 'wb') as f:
diff --git a/stylegan2_ada_pytorch/metrics/metric_main.py b/stylegan2_ada_pytorch/metrics/metric_main.py
index d81beba02..8c11c3208 100644
--- a/stylegan2_ada_pytorch/metrics/metric_main.py
+++ b/stylegan2_ada_pytorch/metrics/metric_main.py
@@ -58,7 +58,7 @@ def calc_metric(metric, **kwargs): # See metric_utils.MetricOptions for the full
         results         = dnnlib.EasyDict(results),
         metric          = metric,
         total_time      = total_time,
-        total_time_str  = stylegan2_ada_pytorch.dnnlib.util.format_time(total_time),
+        total_time_str  = dnnlib.util.format_time(total_time),
         num_gpus        = opts.num_gpus,
     )
 
diff --git a/stylegan2_ada_pytorch/metrics/metric_utils.py b/stylegan2_ada_pytorch/metrics/metric_utils.py
index 70f1588d3..1f6f72933 100644
--- a/stylegan2_ada_pytorch/metrics/metric_utils.py
+++ b/stylegan2_ada_pytorch/metrics/metric_utils.py
@@ -45,7 +45,7 @@ def get_feature_detector(url, device=torch.device('cpu'), num_gpus=1, rank=0, ve
         is_leader = (rank == 0)
         if not is_leader and num_gpus > 1:
             torch.distributed.barrier() # leader goes first
-        with stylegan2_ada_pytorch.dnnlib.util.open_url(url, verbose=(verbose and is_leader)) as f:
+        with dnnlib.util.open_url(url, verbose=(verbose and is_leader)) as f:
             _feature_detector_cache[key] = torch.jit.load(f).eval().to(device)
         if is_leader and num_gpus > 1:
             torch.distributed.barrier() # others follow
@@ -157,7 +157,7 @@ def update(self, cur_items):
         total_time = cur_time - self.start_time
         time_per_item = (cur_time - self.batch_time) / max(cur_items - self.batch_items, 1)
         if (self.verbose) and (self.tag is not None):
-            print(f'{self.tag:<19s} items {cur_items:<7d} time {stylegan2_ada_pytorch.dnnlib.util.format_time(total_time):<12s} ms/item {time_per_item * 1e3:.2f}')
+            print(f'{self.tag:<19s} items {cur_items:<7d} time {dnnlib.util.format_time(total_time):<12s} ms/item {time_per_item * 1e3:.2f}')
         self.batch_time = cur_time
         self.batch_items = cur_items
 
@@ -179,7 +179,7 @@ def sub(self, tag=None, num_items=None, flush_interval=1000, rel_lo=0, rel_hi=1)
 #----------------------------------------------------------------------------
 
 def compute_feature_stats_for_dataset(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, data_loader_kwargs=None, max_items=None, **stats_kwargs):
-    dataset = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
+    dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
     if data_loader_kwargs is None:
         data_loader_kwargs = dict(pin_memory=True, num_workers=3, prefetch_factor=2)
 
@@ -237,7 +237,7 @@ def compute_feature_stats_for_generator(opts, detector_url, detector_kwargs, rel
 
     # Setup generator and load labels.
     G = copy.deepcopy(opts.G).eval().requires_grad_(False).to(opts.device)
-    dataset = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
+    dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
 
     # Image generation func.
     def run_generator(z, c):
diff --git a/stylegan2_ada_pytorch/metrics/perceptual_path_length.py b/stylegan2_ada_pytorch/metrics/perceptual_path_length.py
index b14223fed..0b1131900 100644
--- a/stylegan2_ada_pytorch/metrics/perceptual_path_length.py
+++ b/stylegan2_ada_pytorch/metrics/perceptual_path_length.py
@@ -93,7 +93,7 @@ def forward(self, c):
 #----------------------------------------------------------------------------
 
 def compute_ppl(opts, num_samples, epsilon, space, sampling, crop, batch_size, jit=False):
-    dataset = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
+    dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
     vgg16_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
     vgg16 = metric_utils.get_feature_detector(vgg16_url, num_gpus=opts.num_gpus, rank=opts.rank, verbose=opts.progress.verbose)
 
diff --git a/stylegan2_ada_pytorch/projector.py b/stylegan2_ada_pytorch/projector.py
index 948cdf450..5718dc7f2 100644
--- a/stylegan2_ada_pytorch/projector.py
+++ b/stylegan2_ada_pytorch/projector.py
@@ -58,7 +58,7 @@ def logprint(*args):
 
     # Load VGG16 feature detector.
     url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
-    with stylegan2_ada_pytorch.dnnlib.util.open_url(url) as f:
+    with dnnlib.util.open_url(url) as f:
         vgg16 = torch.jit.load(f).eval().to(device)
 
     # Features for target image.
@@ -161,7 +161,7 @@ def run_projection(
     # Load networks.
     print('Loading networks from "%s"...' % network_pkl)
     device = torch.device('cuda')
-    with stylegan2_ada_pytorch.dnnlib.util.open_url(network_pkl) as fp:
+    with dnnlib.util.open_url(network_pkl) as fp:
         G = legacy.load_network_pkl(fp)['G_ema'].requires_grad_(False).to(device) # type: ignore
 
     # Load target image.
diff --git a/stylegan2_ada_pytorch/style_mixing.py b/stylegan2_ada_pytorch/style_mixing.py
index 1a2612137..507dd2456 100644
--- a/stylegan2_ada_pytorch/style_mixing.py
+++ b/stylegan2_ada_pytorch/style_mixing.py
@@ -61,7 +61,7 @@ def generate_style_mix(
     """
     print('Loading networks from "%s"...' % network_pkl)
     device = torch.device('cuda')
-    with stylegan2_ada_pytorch.dnnlib.util.open_url(network_pkl) as f:
+    with dnnlib.util.open_url(network_pkl) as f:
         G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
 
     os.makedirs(outdir, exist_ok=True)
diff --git a/stylegan2_ada_pytorch/torch_utils/ops/bias_act.py b/stylegan2_ada_pytorch/torch_utils/ops/bias_act.py
index 4635d3eb1..9655f32ba 100644
--- a/stylegan2_ada_pytorch/torch_utils/ops/bias_act.py
+++ b/stylegan2_ada_pytorch/torch_utils/ops/bias_act.py
@@ -12,7 +12,7 @@
 import warnings
 import numpy as np
 import torch
-from ... import dnnlib
+from stylegan2_ada_pytorch import dnnlib
 import traceback
 
 from .. import custom_ops
diff --git a/stylegan2_ada_pytorch/train.py b/stylegan2_ada_pytorch/train.py
index c2e61c9eb..e32b5e08a 100644
--- a/stylegan2_ada_pytorch/train.py
+++ b/stylegan2_ada_pytorch/train.py
@@ -108,7 +108,7 @@ def setup_training_loop_kwargs(
     args.training_set_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data, use_labels=True, max_size=None, xflip=False)
     args.data_loader_kwargs = dnnlib.EasyDict(pin_memory=True, num_workers=3, prefetch_factor=2)
     try:
-        training_set = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**args.training_set_kwargs) # subclass of training.dataset.Dataset
+        training_set = dnnlib.util.construct_class_by_name(**args.training_set_kwargs) # subclass of training.dataset.Dataset
         args.training_set_kwargs.resolution = training_set.resolution # be explicit about resolution
         args.training_set_kwargs.use_labels = training_set.has_labels # be explicit about labels
         args.training_set_kwargs.max_size = len(training_set) # be explicit about dataset size
@@ -362,7 +362,7 @@ def setup_training_loop_kwargs(
 #----------------------------------------------------------------------------
 
 def subprocess_fn(rank, args, temp_dir):
-    stylegan2_ada_pytorch.dnnlib.util.Logger(file_name=os.path.join(args.run_dir, 'log.txt'), file_mode='a', should_flush=True)
+    dnnlib.util.Logger(file_name=os.path.join(args.run_dir, 'log.txt'), file_mode='a', should_flush=True)
 
     # Init torch.distributed.
     if args.num_gpus > 1:
@@ -480,7 +480,7 @@ def main(ctx, outdir, dry_run, **config_kwargs):
       lsundog256     LSUN Dog trained at 256x256 resolution.
       <PATH or URL>  Custom network pickle.
     """
-    stylegan2_ada_pytorch.dnnlib.util.Logger(should_flush=True)
+    dnnlib.util.Logger(should_flush=True)
 
     # Setup training options.
     try:
diff --git a/stylegan2_ada_pytorch/training/training_loop.py b/stylegan2_ada_pytorch/training/training_loop.py
index f32a8819e..8b6457651 100644
--- a/stylegan2_ada_pytorch/training/training_loop.py
+++ b/stylegan2_ada_pytorch/training/training_loop.py
@@ -133,7 +133,7 @@ def training_loop(
     # Load training set.
     if rank == 0:
         print('Loading training set...')
-    training_set = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset
+    training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset
     training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed)
     training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs))
     if rank == 0:
@@ -147,14 +147,14 @@ def training_loop(
     if rank == 0:
         print('Constructing networks...')
     common_kwargs = dict(c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels)
-    G = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
-    D = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
+    G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
+    D = dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
     G_ema = copy.deepcopy(G).eval()
 
     # Resume from existing pickle.
     if (resume_pkl is not None) and (rank == 0):
         print(f'Resuming from "{resume_pkl}"')
-        with stylegan2_ada_pytorch.dnnlib.util.open_url(resume_pkl) as f:
+        with dnnlib.util.open_url(resume_pkl) as f:
             resume_data = legacy.load_network_pkl(f)
         for name, module in [('G', G), ('D', D), ('G_ema', G_ema)]:
             misc.copy_params_and_buffers(resume_data[name], module, require_all=False)
@@ -172,7 +172,7 @@ def training_loop(
     augment_pipe = None
     ada_stats = None
     if (augment_kwargs is not None) and (augment_p > 0 or ada_target is not None):
-        augment_pipe = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(**augment_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
+        augment_pipe = dnnlib.util.construct_class_by_name(**augment_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
         augment_pipe.p.copy_(torch.as_tensor(augment_p))
         if ada_target is not None:
             ada_stats = training_stats.Collector(regex='Loss/signs/real')
@@ -192,18 +192,18 @@ def training_loop(
     # Setup training phases.
     if rank == 0:
         print('Setting up training phases...')
-    loss = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(device=device, **ddp_modules, **loss_kwargs) # subclass of training.loss.Loss
+    loss = dnnlib.util.construct_class_by_name(device=device, **ddp_modules, **loss_kwargs) # subclass of training.loss.Loss
     phases = []
     for name, module, opt_kwargs, reg_interval in [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]:
         if reg_interval is None:
-            opt = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(params=module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
+            opt = dnnlib.util.construct_class_by_name(params=module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
             phases += [dnnlib.EasyDict(name=name + 'both', module=module, opt=opt, interval=1)]
         else: # Lazy regularization.
             mb_ratio = reg_interval / (reg_interval + 1)
             opt_kwargs = dnnlib.EasyDict(opt_kwargs)
             opt_kwargs.lr = opt_kwargs.lr * mb_ratio
             opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas]
-            opt = stylegan2_ada_pytorch.dnnlib.util.construct_class_by_name(module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
+            opt = dnnlib.util.construct_class_by_name(module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
             phases += [dnnlib.EasyDict(name=name + 'main', module=module, opt=opt, interval=1)]
             phases += [dnnlib.EasyDict(name=name + 'reg', module=module, opt=opt, interval=reg_interval)]
     for phase in phases:
@@ -324,7 +324,7 @@ def training_loop(
         fields = []
         fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
         fields += [f"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}"]
-        fields += [f"time {stylegan2_ada_pytorch.dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"]
+        fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"]
         fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"]
         fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"]
         fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"]