diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 713c1e6c421..00000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,3092 +0,0 @@ -version: 2.1 - -# How to test the Linux jobs: -# - Install CircleCI local CLI: https://circleci.com/docs/2.0/local-cli/ -# - circleci config process .circleci/config.yml > gen.yml && circleci local execute -c gen.yml --job binary_linux_wheel_py3.7 -# - Replace binary_linux_wheel_py3.7 with the name of the job you want to test. -# Job names are 'name:' key. - -executors: - windows-cpu: - machine: - resource_class: windows.xlarge - image: windows-server-2019-vs2019:stable - shell: bash.exe - - windows-gpu: - machine: - resource_class: windows.gpu.nvidia.medium - image: windows-server-2019-nvidia:stable - shell: bash.exe - -commands: - checkout_merge: - description: "checkout merge branch" - steps: - - checkout -# - run: -# name: Checkout merge branch -# command: | -# set -ex -# BRANCH=$(git rev-parse --abbrev-ref HEAD) -# if [[ "$BRANCH" != "main" ]]; then -# git fetch --force origin ${CIRCLE_BRANCH}/merge:merged/${CIRCLE_BRANCH} -# git checkout "merged/$CIRCLE_BRANCH" -# fi - designate_upload_channel: - description: "inserts the correct upload channel into ${BASH_ENV}" - steps: - - run: - name: adding UPLOAD_CHANNEL to BASH_ENV - command: | - our_upload_channel=nightly - # On tags upload to test instead - if [[ -n "${CIRCLE_TAG}" ]]; then - our_upload_channel=test - fi - echo "export UPLOAD_CHANNEL=${our_upload_channel}" >> ${BASH_ENV} - - brew_update: - description: "Update Homebrew and install base formulae" - steps: - - run: - name: Update Homebrew - no_output_timeout: "10m" - command: | - set -ex - - # Update repositories manually. - # Running `brew update` produces a comparison between the - # current checkout and the updated checkout, which takes a - # very long time because the existing checkout is 2y old. - for path in $(find /usr/local/Homebrew -type d -name .git) - do - cd $path/.. - git fetch --depth=1 origin - git reset --hard origin/master - done - - export HOMEBREW_NO_AUTO_UPDATE=1 - - # Install expect and moreutils so that we can call `unbuffer` and `ts`. - # moreutils installs a `parallel` executable by default, which conflicts - # with the executable from the GNU `parallel`, so we must unlink GNU - # `parallel` first, and relink it afterwards. - brew install coreutils - brew unlink parallel - brew install moreutils - brew link parallel --overwrite - brew install expect - - brew_install: - description: "Install Homebrew formulae" - parameters: - formulae: - type: string - default: "" - steps: - - run: - name: Install << parameters.formulae >> - no_output_timeout: "10m" - command: | - set -ex - export HOMEBREW_NO_AUTO_UPDATE=1 - brew install << parameters.formulae >> - - run_brew_for_ios_build: - steps: - - brew_update - - brew_install: - formulae: libtool - - apt_install: - parameters: - args: - type: string - descr: - type: string - default: "" - update: - type: boolean - default: true - steps: - - run: - name: > - <<^ parameters.descr >> apt install << parameters.args >> <> - <<# parameters.descr >> << parameters.descr >> <> - command: | - <<# parameters.update >> sudo apt update -qy <> - sudo apt install << parameters.args >> - - pip_install: - parameters: - args: - type: string - descr: - type: string - default: "" - user: - type: boolean - default: true - steps: - - run: - name: > - <<^ parameters.descr >> pip install << parameters.args >> <> - <<# parameters.descr >> << parameters.descr >> <> - command: > - pip install - <<# parameters.user >> --user <> - --progress-bar=off - << parameters.args >> - - install_torchvision: - parameters: - editable: - type: boolean - default: true - steps: - - pip_install: - args: --pre torch --extra-index-url https://download.pytorch.org/whl/nightly/cpu - descr: Install PyTorch from nightly releases - - pip_install: - args: --no-build-isolation <<# parameters.editable >> --editable <> . - descr: Install torchvision <<# parameters.editable >> in editable mode <> - - install_prototype_dependencies: - steps: - - pip_install: - args: iopath - descr: Install third-party dependencies - - pip_install: - args: --pre torchdata --extra-index-url https://download.pytorch.org/whl/nightly/cpu - descr: Install torchdata from nightly releases - - # Most of the test suite is handled by the `unittest` jobs, with completely different workflow and setup. - # This command can be used if only a selection of tests need to be run, for ad-hoc files. - run_tests_selective: - parameters: - file_or_dir: - type: string - steps: - - run: - name: Install test utilities - command: pip install --progress-bar=off pytest pytest-mock - - run: - name: Run tests - command: pytest --junitxml=test-results/junit.xml -v --durations 20 <> - - store_test_results: - path: test-results - - download_model_weights: - parameters: - extract_roots: - type: string - default: "torchvision/models" - background: - type: boolean - default: true - steps: - - apt_install: - args: parallel wget - descr: Install download utilitites - - run: - name: Download model weights - background: << parameters.background >> - command: | - mkdir -p ~/.cache/torch/hub/checkpoints - python scripts/collect_model_urls.py << parameters.extract_roots >> \ - | parallel -j0 'wget --no-verbose -O ~/.cache/torch/hub/checkpoints/`basename {}` {}\?source=ci' - -binary_common: &binary_common - parameters: - # Edit these defaults to do a release - build_version: - description: "version number of release binary; by default, build a nightly" - type: string - default: "" - pytorch_version: - description: "PyTorch version to build against; by default, use a nightly" - type: string - default: "" - # Don't edit these - python_version: - description: "Python version to build against (e.g., 3.7)" - type: string - cu_version: - description: "CUDA version to build against, in CU format (e.g., cpu or cu100)" - type: string - default: "cpu" - unicode_abi: - description: "Python 2.7 wheel only: whether or not we are cp27mu (default: no)" - type: string - default: "" - wheel_docker_image: - description: "Wheel only: what docker image to use" - type: string - default: "" - conda_docker_image: - description: "Conda only: what docker image to use" - type: string - default: "pytorch/conda-builder:cpu" - environment: - PYTHON_VERSION: << parameters.python_version >> - PYTORCH_VERSION: << parameters.pytorch_version >> - UNICODE_ABI: << parameters.unicode_abi >> - CU_VERSION: << parameters.cu_version >> - MACOSX_DEPLOYMENT_TARGET: 10.9 - -torchvision_ios_params: &torchvision_ios_params - parameters: - build_environment: - type: string - default: "" - ios_arch: - type: string - default: "" - ios_platform: - type: string - default: "" - environment: - BUILD_ENVIRONMENT: << parameters.build_environment >> - IOS_ARCH: << parameters.ios_arch >> - IOS_PLATFORM: << parameters.ios_platform >> - -torchvision_android_params: &torchvision_android_params - parameters: - build_environment: - type: string - default: "" - environment: - BUILD_ENVIRONMENT: << parameters.build_environment >> - -smoke_test_common: &smoke_test_common - <<: *binary_common - docker: - - image: torchvision/smoke_test:latest - -jobs: - circleci_consistency: - docker: - - image: cimg/python:3.7 - steps: - - checkout - - pip_install: - args: jinja2 pyyaml - - run: - name: Check CircleCI config consistency - command: | - python .circleci/regenerate.py - git diff --exit-code || (echo ".circleci/config.yml not in sync with config.yml.in! Run .circleci/regenerate.py to update config"; exit 1) - - lint_python_and_config: - docker: - - image: cimg/python:3.7 - steps: - - checkout - - pip_install: - args: pre-commit - descr: Install lint utilities - - run: - name: Install pre-commit hooks - command: pre-commit install-hooks - - run: - name: Lint Python code and config files - command: pre-commit run --all-files - - run: - name: Required lint modifications - when: on_fail - command: git --no-pager diff - - lint_c: - docker: - - image: cimg/python:3.7 - steps: - - apt_install: - args: libtinfo5 - descr: Install additional system libraries - - checkout - - run: - name: Install lint utilities - command: | - curl https://oss-clang-format.s3.us-east-2.amazonaws.com/linux64/clang-format-linux64 -o clang-format - chmod +x clang-format - sudo mv clang-format /opt/clang-format - - run: - name: Lint C code - command: ./.circleci/unittest/linux/scripts/run-clang-format.py -r torchvision/csrc --clang-format-executable /opt/clang-format - - run: - name: Required lint modifications - when: on_fail - command: git --no-pager diff - - type_check_python: - docker: - - image: cimg/python:3.7 - steps: - - checkout - - install_torchvision: - editable: true - - install_prototype_dependencies - - pip_install: - args: mypy - descr: Install Python type check utilities - - run: - name: Check Python types statically - command: mypy --install-types --non-interactive --config-file mypy.ini - - unittest_torchhub: - docker: - - image: cimg/python:3.7 - steps: - - checkout - - install_torchvision - - run_tests_selective: - file_or_dir: test/test_hub.py - - unittest_onnx: - docker: - - image: cimg/python:3.7 - steps: - - checkout - - install_torchvision - - pip_install: - args: onnx onnxruntime - descr: Install ONNX - - run_tests_selective: - file_or_dir: test/test_onnx.py - - unittest_extended: - docker: - - image: cimg/python:3.7 - resource_class: xlarge - steps: - - checkout - - download_model_weights - - install_torchvision - - run: - name: Enable extended tests - command: echo 'export PYTORCH_TEST_WITH_EXTENDED=1' >> $BASH_ENV - - run_tests_selective: - file_or_dir: test/test_extended_*.py - - binary_linux_wheel: - <<: *binary_common - docker: - - image: << parameters.wheel_docker_image >> - resource_class: 2xlarge+ - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Build conda packages - no_output_timeout: 30m - command: | - set -ex - packaging/build_wheel.sh - - store_artifacts: - path: dist - - persist_to_workspace: - root: dist - paths: - - "*" - - binary_linux_conda: - <<: *binary_common - docker: - - image: "<< parameters.conda_docker_image >>" - resource_class: 2xlarge+ - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Build conda packages - no_output_timeout: 30m - command: | - set -ex - packaging/build_conda.sh - - store_artifacts: - path: /opt/conda/conda-bld/linux-64 - - persist_to_workspace: - root: /opt/conda/conda-bld/linux-64 - paths: - - "*" - - store_test_results: - path: build_results/ - - binary_win_conda: - <<: *binary_common - executor: windows-cpu - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Build conda packages - no_output_timeout: 30m - command: | - set -ex - source packaging/windows/internal/vc_install_helper.sh - packaging/windows/internal/cuda_install.bat - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate base - conda install -yq conda-build "conda-package-handling!=1.5.0" - packaging/build_conda.sh - rm /C/tools/miniconda3/conda-bld/win-64/vs${VC_YEAR}*.tar.bz2 - - store_artifacts: - path: C:/tools/miniconda3/conda-bld/win-64 - - persist_to_workspace: - root: C:/tools/miniconda3/conda-bld/win-64 - paths: - - "*" - - store_test_results: - path: build_results/ - - binary_win_wheel: - <<: *binary_common - executor: windows-cpu - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Build wheel packages - no_output_timeout: 30m - command: | - set -ex - source packaging/windows/internal/vc_install_helper.sh - packaging/windows/internal/cuda_install.bat - packaging/build_wheel.sh - - store_artifacts: - path: dist - - persist_to_workspace: - root: dist - paths: - - "*" - - store_test_results: - path: build_results/ - - binary_macos_wheel: - <<: *binary_common - macos: - xcode: "14.0" - steps: - - checkout_merge - - designate_upload_channel - - run: - # Cannot easily deduplicate this as source'ing activate - # will set environment variables which we need to propagate - # to build_wheel.sh - command: | - curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh - sh conda.sh -b - source $HOME/miniconda3/bin/activate - packaging/build_wheel.sh - - store_artifacts: - path: dist - - persist_to_workspace: - root: dist - paths: - - "*" - - binary_ios_build: - <<: *torchvision_ios_params - macos: - xcode: "14.0" - steps: - - attach_workspace: - at: ~/workspace - - checkout - - run_brew_for_ios_build - - run: - name: Build - no_output_timeout: "1h" - command: | - script="/Users/distiller/project/.circleci/unittest/ios/scripts/binary_ios_build.sh" - cat "$script" - source "$script" - - persist_to_workspace: - root: /Users/distiller/workspace/ - paths: ios - - binary_ios_upload: - <<: *torchvision_ios_params - macos: - xcode: "14.0" - steps: - - attach_workspace: - at: ~/workspace - - checkout - - run_brew_for_ios_build - - run: - name: Upload - no_output_timeout: "1h" - command: | - script="/Users/distiller/project/.circleci/unittest/ios/scripts/binary_ios_upload.sh" - cat "$script" - source "$script" - - binary_android_build: - <<: *torchvision_android_params - docker: - - image: cimg/android:2021.08-ndk - resource_class: xlarge - steps: - - attach_workspace: - at: ~/workspace - - checkout - - run: - name: Build - no_output_timeout: "1h" - command: | - script="/home/circleci/project/.circleci/unittest/android/scripts/binary_android_build.sh" - cat "$script" - source "$script" - - store_artifacts: - path: ~/workspace/artifacts - - binary_android_upload: - <<: *torchvision_android_params - docker: - - image: cimg/android:2021.08-ndk - resource_class: xlarge - steps: - - attach_workspace: - at: ~/workspace - - checkout - - run: - name: Upload - no_output_timeout: "1h" - command: | - script="/home/circleci/project/.circleci/unittest/android/scripts/binary_android_upload.sh" - cat "$script" - source "$script" - - binary_macos_conda: - <<: *binary_common - macos: - xcode: "14.0" - steps: - - checkout_merge - - designate_upload_channel - - run: - command: | - curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh - sh conda.sh -b - source $HOME/miniconda3/bin/activate - conda install -yq conda-build - packaging/build_conda.sh - - store_artifacts: - path: /Users/distiller/miniconda3/conda-bld/osx-64 - - persist_to_workspace: - root: /Users/distiller/miniconda3/conda-bld/osx-64 - paths: - - "*" - - store_test_results: - path: build_results/ - - # Requires org-member context - binary_conda_upload: - docker: - - image: continuumio/miniconda - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - run: - command: | - # Prevent credential from leaking - conda install -yq anaconda-client - set -x - anaconda -t "${CONDA_PYTORCHBOT_TOKEN}" upload ~/workspace/*.tar.bz2 -u "pytorch-${UPLOAD_CHANNEL}" --label main --no-progress --force - - # Requires org-member context - binary_wheel_upload: - parameters: - subfolder: - description: "What whl subfolder to upload to, e.g., blank or cu100/ (trailing slash is important)" - type: string - docker: - - image: cimg/python:3.7 - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - checkout - - pip_install: - args: awscli - - run: - command: | - export PATH="$HOME/.local/bin:$PATH" - # Prevent credential from leaking - set +x - export AWS_ACCESS_KEY_ID="${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}" - export AWS_SECRET_ACCESS_KEY="${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}" - set -x - for pkg in ~/workspace/*.whl; do - aws s3 cp "$pkg" "s3://pytorch/whl/${UPLOAD_CHANNEL}/<< parameters.subfolder >>" --acl public-read - done - - smoke_test_linux_conda: - <<: *smoke_test_common - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - run: - name: install binaries - command: | - set -x - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - conda install -v -y -c pytorch-nightly pytorch - conda install -v -y $(ls ~/workspace/torchvision*.tar.bz2) - - run: - name: smoke test - command: | - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - python -c "import torchvision" - - smoke_test_linux_pip: - <<: *smoke_test_common - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - run: - name: install binaries - command: | - set -x - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - - pip_install: - args: $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html - - run: - name: smoke test - command: | - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - python -c "import torchvision" - - smoke_test_docker_image_build: - machine: - image: ubuntu-2004:202104-01 - resource_class: large - environment: - image_name: torchvision/smoke_test - steps: - - checkout - - designate_upload_channel - - run: - name: Build and push Docker image - no_output_timeout: "1h" - command: | - set +x - echo "${DOCKER_HUB_TOKEN}" | docker login --username "${DOCKER_HUB_USERNAME}" --password-stdin - set -x - cd .circleci/smoke_test/docker && docker build . -t ${image_name}:${CIRCLE_WORKFLOW_ID} - docker tag ${image_name}:${CIRCLE_WORKFLOW_ID} ${image_name}:latest - docker push ${image_name}:${CIRCLE_WORKFLOW_ID} - docker push ${image_name}:latest - - smoke_test_win_conda: - <<: *binary_common - executor: - name: windows-cpu - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - run: - name: install binaries - command: | - set -x - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda env remove -n python${PYTHON_VERSION} || true - conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} - conda activate python${PYTHON_VERSION} - conda install -v -y -c pytorch-nightly pytorch - conda install -v -y $(ls ~/workspace/torchvision*.tar.bz2) - - run: - name: smoke test - command: | - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate python${PYTHON_VERSION} - python -c "import torchvision" - - smoke_test_win_pip: - <<: *binary_common - executor: - name: windows-cpu - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - run: - name: install binaries - command: | - set -x - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} - conda activate python${PYTHON_VERSION} - - pip_install: - args: $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html - - run: - name: smoke test - command: | - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate python${PYTHON_VERSION} - python -c "import torchvision" - - unittest_linux_cpu: - <<: *binary_common - docker: - - image: "pytorch/manylinux-cpu" - resource_class: 2xlarge+ - steps: - - checkout - - designate_upload_channel - - run: - name: Generate cache key - # This will refresh cache on Sundays, nightly build should generate new cache. - command: echo "$(date +"%Y-%U")" > .circleci-weekly - - restore_cache: - - keys: - - env-v2-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - - run: - name: Setup - command: .circleci/unittest/linux/scripts/setup_env.sh - - save_cache: - - key: env-v2-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - paths: - - conda - - env - - run: - name: Install torchvision - command: .circleci/unittest/linux/scripts/install.sh - - run: - name: Run tests - command: .circleci/unittest/linux/scripts/run_test.sh - - run: - name: Post process - command: .circleci/unittest/linux/scripts/post_process.sh - - store_test_results: - path: test-results - - unittest_linux_gpu: - <<: *binary_common - machine: - image: ubuntu-2004-cuda-11.4:202110-01 - resource_class: gpu.nvidia.medium - environment: - image_name: "pytorch/manylinux-cuda116" - CU_VERSION: << parameters.cu_version >> - PYTHON_VERSION: << parameters.python_version >> - steps: - - checkout - - designate_upload_channel - - run: - name: Generate cache key - # This will refresh cache on Sundays, nightly build should generate new cache. - command: echo "$(date +"%Y-%U")" > .circleci-weekly - - restore_cache: - - keys: - - env-v3-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - - run: - name: Setup - command: docker run -e PYTHON_VERSION -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/setup_env.sh - - save_cache: - - key: env-v3-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - paths: - - conda - - env - - run: - # Here we create an envlist file that contains some env variables that we want the docker container to be aware of. - # Normally, the CIRCLECI variable is set and available on all CI workflows: https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables. - # They're avaiable in all the other workflows (OSX and Windows). - # But here, we're running the unittest_linux_gpu workflows in a docker container, where those variables aren't accessible. - # So instead we dump the variables we need in env.list and we pass that file when invoking "docker run". - name: export CIRCLECI env var - command: echo "CIRCLECI=true" >> ./env.list - - run: - name: Install torchvision - command: docker run -t --gpus all -v $PWD:$PWD -w $PWD -e UPLOAD_CHANNEL -e CU_VERSION "${image_name}" .circleci/unittest/linux/scripts/install.sh - - run: - name: Run tests - command: docker run --env-file ./env.list -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/run_test.sh - - run: - name: Post Process - command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/post_process.sh - - store_test_results: - path: test-results - - unittest_windows_cpu: - <<: *binary_common - executor: - name: windows-cpu - steps: - - checkout - - designate_upload_channel - - run: - name: Generate cache key - # This will refresh cache on Sundays, nightly build should generate new cache. - command: echo "$(date +"%Y-%U")" > .circleci-weekly - - restore_cache: - - keys: - - env-v2-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - - run: - name: Setup - command: .circleci/unittest/windows/scripts/setup_env.sh - - save_cache: - - key: env-v2-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - paths: - - conda - - env - - run: - name: Install torchvision - command: .circleci/unittest/windows/scripts/install.sh - - run: - name: Run tests - command: .circleci/unittest/windows/scripts/run_test.sh - - run: - name: Post process - command: .circleci/unittest/windows/scripts/post_process.sh - - store_test_results: - path: test-results - - unittest_windows_gpu: - <<: *binary_common - executor: - name: windows-gpu - environment: - CUDA_VERSION: "11.6" - PYTHON_VERSION: << parameters.python_version >> - steps: - - checkout - - designate_upload_channel - - run: - name: Generate cache key - # This will refresh cache on Sundays, nightly build should generate new cache. - command: echo "$(date +"%Y-%U")" > .circleci-weekly - - restore_cache: - - keys: - - env-v1-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - - run: - name: Setup - command: .circleci/unittest/windows/scripts/setup_env.sh - - save_cache: - - key: env-v1-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - paths: - - conda - - env - - run: - name: Install CUDA - command: packaging/windows/internal/cuda_install.bat - - run: - name: Update CUDA driver - command: packaging/windows/internal/driver_update.bat - - run: - name: Install torchvision - command: .circleci/unittest/windows/scripts/install.sh - - run: - name: Run tests - command: .circleci/unittest/windows/scripts/run_test.sh - - run: - name: Post process - command: .circleci/unittest/windows/scripts/post_process.sh - - store_test_results: - path: test-results - - unittest_macos_cpu: - <<: *binary_common - macos: - xcode: "14.0" - resource_class: large - steps: - - checkout - - designate_upload_channel - - run: - name: Install wget - command: HOMEBREW_NO_AUTO_UPDATE=1 brew install wget - # Disable brew auto update which is very slow - - run: - name: Generate cache key - # This will refresh cache on Sundays, nightly build should generate new cache. - command: echo "$(date +"%Y-%U")" > .circleci-weekly - - restore_cache: - - keys: - - env-v3-macos-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - - run: - name: Setup - command: .circleci/unittest/linux/scripts/setup_env.sh - - save_cache: - - key: env-v3-macos-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - paths: - - conda - - env - - run: - name: Install torchvision - command: .circleci/unittest/linux/scripts/install.sh - - run: - name: Run tests - command: .circleci/unittest/linux/scripts/run_test.sh - - run: - name: Post process - command: .circleci/unittest/linux/scripts/post_process.sh - - store_test_results: - path: test-results - - cmake_linux_cpu: - <<: *binary_common - docker: - - image: "pytorch/manylinux-cpu" - resource_class: 2xlarge+ - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Setup conda - command: .circleci/unittest/linux/scripts/setup_env.sh - - run: packaging/build_cmake.sh - - cmake_linux_gpu: - <<: *binary_common - machine: - image: ubuntu-2004-cuda-11.4:202110-01 - resource_class: gpu.nvidia.small - environment: - PYTHON_VERSION: << parameters.python_version >> - PYTORCH_VERSION: << parameters.pytorch_version >> - UNICODE_ABI: << parameters.unicode_abi >> - CU_VERSION: << parameters.cu_version >> - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Setup conda - command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> .circleci/unittest/linux/scripts/setup_env.sh - - run: - name: Build torchvision C++ distribution and test - no_output_timeout: 30m - command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -e UPLOAD_CHANNEL -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> packaging/build_cmake.sh - - cmake_macos_cpu: - <<: *binary_common - macos: - xcode: "14.0" - steps: - - checkout_merge - - designate_upload_channel - - run: - command: | - curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh - sh conda.sh -b - source $HOME/miniconda3/bin/activate - conda install -yq conda-build cmake - packaging/build_cmake.sh - - cmake_windows_cpu: - <<: *binary_common - executor: - name: windows-cpu - steps: - - checkout_merge - - designate_upload_channel - - run: - command: | - set -ex - source packaging/windows/internal/vc_install_helper.sh - packaging/build_cmake.sh - - cmake_windows_gpu: - <<: *binary_common - executor: - name: windows-gpu - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Update CUDA driver - command: packaging/windows/internal/driver_update.bat - - run: - command: | - set -ex - source packaging/windows/internal/vc_install_helper.sh - packaging/windows/internal/cuda_install.bat - packaging/build_cmake.sh - - build_docs: - <<: *binary_common - docker: - - image: cimg/python:3.7 - resource_class: 2xlarge+ - steps: - - attach_workspace: - at: ~/workspace - - checkout - - download_model_weights - - run: - name: Setup - command: .circleci/unittest/linux/scripts/setup_env.sh - - designate_upload_channel - - run: - name: Install torchvision - command: .circleci/unittest/linux/scripts/install.sh - - run: - name: Build docs - command: | - set -ex - # turn v1.12.0rc3 into 1.12.0 - tag=$(echo $CIRCLE_TAG | sed -e 's/v*\([0-9.]*\).*/\1/') - VERSION=${tag:-main} - eval "$(./conda/bin/conda shell.bash hook)" - conda activate ./env - pushd docs - pip install --progress-bar=off -r requirements.txt - make html - popd - - persist_to_workspace: - root: ./ - paths: - - "*" - - store_artifacts: - path: ./docs/build/html - destination: docs - - upload_docs: - <<: *binary_common - docker: - - image: "pytorch/manylinux-cuda100" - resource_class: 2xlarge+ - steps: - - attach_workspace: - at: ~/workspace - - run: - name: Generate netrc - command: | - # set credentials for https pushing - # requires the org-member context - cat > ~/.netrc \<> "$GITHUB_ENV" - fi - - name: Set Release CHANNEL (for release) - if: ${{ (github.event_name == 'pull_request' && startsWith(github.base_ref, 'release')) || startsWith(github.ref, 'refs/heads/release') }} - run: | - echo "CHANNEL=test" >> "$GITHUB_ENV" - - name: Setup miniconda - uses: pytorch/test-infra/.github/actions/setup-miniconda@main - - name: Build TorchVision M1 wheel - shell: arch -arch arm64 bash {0} - env: - ENV_NAME: conda-env-${{ github.run_id }} - PY_VERS: ${{ matrix.py_vers }} - run: | - # Needed for JPEG library detection as setup.py detects conda presence by running `shutil.which('conda')` - set -ex - . packaging/pkg_helpers.bash - # if we are uploading to test channell, our version consist only of the base: 0.x.x - no date string or suffix added - if [[ $CHANNEL == "test" ]]; then - setup_base_build_version - else - setup_build_version - fi - - conda create -yp ${ENV_NAME} python=${PY_VERS} numpy libpng jpeg wheel pkg-config - conda run -p ${ENV_NAME} python3 -mpip install torch --pre --extra-index-url=https://download.pytorch.org/whl/${CHANNEL} - conda run -p ${ENV_NAME} python3 -mpip install delocate - conda run -p ${ENV_NAME} python3 setup.py bdist_wheel - export PYTORCH_VERSION="$(conda run -p ${ENV_NAME} python3 -mpip show torch | grep ^Version: | sed 's/Version: *//')" - conda run -p ${ENV_NAME} DYLD_FALLBACK_LIBRARY_PATH="${ENV_NAME}/lib" delocate-wheel -v --ignore-missing-dependencies dist/*.whl - conda env remove -p ${ENV_NAME} - - name: Test wheel - shell: arch -arch arm64 bash {0} - env: - ENV_NAME: conda-test-env-${{ github.run_id }} - PY_VERS: ${{ matrix.py_vers }} - run: | - set -ex - conda create -yp ${ENV_NAME} python=${PY_VERS} numpy - conda run -p ${ENV_NAME} python3 -mpip install torch --pre --extra-index-url=https://download.pytorch.org/whl/${CHANNEL} - conda run -p ${ENV_NAME} python3 -mpip install dist/*.whl - # Test torch is importable, by changing cwd and running import commands - conda run --cwd /tmp -p ${ENV_NAME} python3 -c "import torchvision;print('torchvision version is ', torchvision.__version__)" - conda run --cwd /tmp -p ${ENV_NAME} python3 -c "import torch;import torchvision;print('Is torchvision useable?', all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]))" - conda run --cwd /tmp -p ${ENV_NAME} python3 -c "import torchvision;print(torchvision.io.read_image('${PWD}/gallery/assets/dog1.jpg').shape)" - conda env remove -p ${ENV_NAME} - - name: Upload wheel to GitHub - uses: actions/upload-artifact@v3 - with: - name: torchvision-py${{ matrix.py_vers }}-macos11-m1 - path: dist/ - - name: Upload wheel to S3 - if: ${{ github.event_name == 'push' && (github.event.ref == 'refs/heads/nightly' || startsWith(github.event.ref, 'refs/tags/')) }} - shell: arch -arch arm64 bash {0} - env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_UPLOADER_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_UPLOADER_SECRET_ACCESS_KEY }} - run: | - for pkg in dist/*; do - aws s3 cp "$pkg" "s3://pytorch/whl/${CHANNEL}/cpu/" --acl public-read - done - build_conda: - name: "Build TorchVision M1 conda packages" - runs-on: macos-m1-12 - strategy: - matrix: - py_vers: [ "3.8", "3.9", "3.10" ] - steps: - - name: Checkout repository - uses: actions/checkout@v2 - - name: Set CHANNEL (only for tagged pushes) - if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/') }} - run: | - # reference ends with an RC suffix - if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then - echo "CHANNEL=test" >> "$GITHUB_ENV" - fi - - name: Set CHANNEL Release (for release) - if: ${{ (github.event_name == 'pull_request' && startsWith(github.base_ref, 'release')) || startsWith(github.ref, 'refs/heads/release') }} - run: | - echo "CHANNEL=test" >> "$GITHUB_ENV" - - name: Setup miniconda - uses: pytorch/test-infra/.github/actions/setup-miniconda@main - - name: Install conda-build and purge previous artifacts - shell: arch -arch arm64 bash {0} - run: | - conda install -yq conda-build - conda build purge-all - - name: Build TorchVision M1 conda package - shell: arch -arch arm64 bash {0} - env: - ENV_NAME: conda-env-${{ github.run_id }} - PYTHON_VERSION: ${{ matrix.py_vers }} - CU_VERSION: cpu - run: | - set -ex - . packaging/pkg_helpers.bash - - if [[ $CHANNEL == "test" ]]; then - setup_base_build_version - else - setup_build_version - fi - - setup_conda_pytorch_constraint - export SOURCE_ROOT_DIR=$(pwd) - conda build \ - -c defaults \ - $CONDA_CHANNEL_FLAGS \ - --no-anaconda-upload \ - --python "$PYTHON_VERSION" \ - --output-folder=dist/ \ - packaging/torchvision - - name: Upload package to GitHub - uses: actions/upload-artifact@v3 - with: - name: torchvision-py${{ matrix.py_vers }}-macos11-m1-conda - path: dist/ - - name: Upload package to conda - if: ${{ github.event_name == 'push' && (github.event.ref == 'refs/heads/nightly' || startsWith(github.event.ref, 'refs/tags/')) }} - shell: arch -arch arm64 bash {0} - env: - CONDA_PYTORCHBOT_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }} - run: | - conda install -yq anaconda-client - set -x - export ANACONDA_PATH=$(conda info --base)/bin - $ANACONDA_PATH/anaconda -t "${CONDA_PYTORCHBOT_TOKEN}" upload dist/osx-arm64/*.tar.bz2 -u "pytorch-${CHANNEL}" --label main --no-progress --force diff --git a/.github/workflows/pr-labels.yml b/.github/workflows/pr-labels.yml deleted file mode 100644 index 20c37e4fd88..00000000000 --- a/.github/workflows/pr-labels.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: pr-labels - -on: - push: - branches: - - main - -jobs: - is-properly-labeled: - runs-on: ubuntu-latest - - steps: - - name: Set up python - uses: actions/setup-python@v2 - - - name: Install requests - run: pip install requests - - - name: Checkout repository - uses: actions/checkout@v2 - - - name: Process commit and find merger responsible for labeling - id: commit - run: echo "::set-output name=merger::$(python .github/process_commit.py ${{ github.sha }})" - - - name: Ping merger responsible for labeling if necessary - if: ${{ steps.commit.outputs.merger != '' }} - uses: mshick/add-pr-comment@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - message: | - Hey ${{ steps.commit.outputs.merger }}! - - You merged this PR, but no labels were added. The list of valid labels is available at https://github.com/pytorch/vision/blob/main/.github/process_commit.py diff --git a/.github/workflows/prototype-tests.yml b/.github/workflows/prototype-tests.yml index 5e9ca360d08..7ef890e8416 100644 --- a/.github/workflows/prototype-tests.yml +++ b/.github/workflows/prototype-tests.yml @@ -7,13 +7,20 @@ jobs: prototype: strategy: matrix: - os: - - ubuntu-latest - - windows-latest - - macos-latest + runner: + - 1 + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 fail-fast: false - runs-on: ${{ matrix.os }} + runs-on: ubuntu-latest steps: - name: Set up python @@ -43,41 +50,12 @@ jobs: id: setup run: exit 0 - - name: Run prototype features tests - shell: bash - run: | - pytest \ - --durations=20 \ - --cov=torchvision/prototype/features \ - --cov-report=term-missing \ - test/test_prototype_features*.py - - - name: Run prototype datasets tests - if: success() || ( failure() && steps.setup.conclusion == 'success' ) - shell: bash + - name: Test non-determinism run: | - pytest \ - --durations=20 \ - --cov=torchvision/prototype/datasets \ - --cov-report=term-missing \ - test/test_prototype_datasets*.py + pip install tqdm + python test_gaussian_blur_non_determinism.py - name: Run prototype transforms tests if: success() || ( failure() && steps.setup.conclusion == 'success' ) shell: bash - run: | - pytest \ - --durations=20 \ - --cov=torchvision/prototype/transforms \ - --cov-report=term-missing \ - test/test_prototype_transforms*.py - - - name: Run prototype models tests - if: success() || ( failure() && steps.setup.conclusion == 'success' ) - shell: bash - run: | - pytest \ - --durations=20 \ - --cov=torchvision/prototype/models \ - --cov-report=term-missing \ - test/test_prototype_models*.py + run: pytest -rP test/test_prototype_transforms_functional.py::TestKernels::test_scripted_vs_eager -k gauss diff --git a/.github/workflows/test-m1.yml b/.github/workflows/test-m1.yml deleted file mode 100644 index 1e5f79f82fd..00000000000 --- a/.github/workflows/test-m1.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Unit-tests on M1 -on: - pull_request: - push: - branches: - - nightly - - main - - release/* - workflow_dispatch: -env: - CHANNEL: "nightly" -jobs: - tests: - name: "Unit-tests on M1" - runs-on: macos-m1-12 - strategy: - matrix: - py_vers: [ "3.8"] - - steps: - - name: Checkout repository - uses: actions/checkout@v2 - - name: Set Release CHANNEL (for release) - if: ${{ (github.event_name == 'pull_request' && startsWith(github.base_ref, 'release')) || startsWith(github.ref, 'refs/heads/release') }} - run: | - echo "CHANNEL=test" >> "$GITHUB_ENV" - - name: Install TorchVision - shell: arch -arch arm64 bash {0} - env: - ENV_NAME: conda-env-${{ github.run_id }} - PY_VERS: ${{ matrix.py_vers }} - run: | - . ~/miniconda3/etc/profile.d/conda.sh - # Needed for JPEG library detection as setup.py detects conda presence by running `shutil.which('conda')` - export PATH=~/miniconda3/bin:$PATH - set -ex - conda create -yp ${ENV_NAME} python=${PY_VERS} numpy libpng jpeg scipy - conda run -p ${ENV_NAME} python3 -mpip install --pre torch --extra-index-url=https://download.pytorch.org/whl/${CHANNEL} - conda run -p ${ENV_NAME} python3 setup.py develop - conda run -p ${ENV_NAME} python3 -mpip install pytest pytest-mock av - - name: Run tests - shell: arch -arch arm64 bash {0} - env: - ENV_NAME: conda-env-${{ github.run_id }} - PY_VERS: ${{ matrix.py_vers }} - run: | - . ~/miniconda3/etc/profile.d/conda.sh - set -ex - conda run -p ${ENV_NAME} --no-capture-output python3 -u -mpytest -v --tb=long --durations 20 - conda env remove -p ${ENV_NAME} diff --git a/.github/workflows/tests-schedule.yml b/.github/workflows/tests-schedule.yml deleted file mode 100644 index ecc283cac27..00000000000 --- a/.github/workflows/tests-schedule.yml +++ /dev/null @@ -1,54 +0,0 @@ -name: tests - -on: - pull_request: - paths: - - "test/test_datasets_download.py" - - ".github/failed_schedule_issue_template.md" - - ".github/workflows/tests-schedule.yml" - - schedule: - - cron: "0 9 * * *" - -jobs: - download: - runs-on: ubuntu-latest - - steps: - - name: Set up python - uses: actions/setup-python@v2 - with: - python-version: 3.7 - - - name: Upgrade system packages - run: python -m pip install --upgrade pip setuptools wheel - - - name: Checkout repository - uses: actions/checkout@v2 - - - name: Install torch nightly build - run: pip install --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html - - - name: Install torchvision - run: pip install --no-build-isolation --editable . - - - name: Install all optional dataset requirements - run: pip install scipy pycocotools lmdb requests - - - name: Install tests requirements - run: pip install pytest - - - name: Run tests - run: pytest -ra -v test/test_datasets_download.py - - - uses: JasonEtco/create-an-issue@v2.4.0 - name: Create issue if download tests failed - if: failure() && github.event_name == 'schedule' - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - REPO: ${{ github.repository }} - WORKFLOW: ${{ github.workflow }} - JOB: ${{ github.job }} - ID: ${{ github.run_id }} - with: - filename: .github/failed_schedule_issue_template.md diff --git a/test/test_prototype_transforms_functional.py b/test/test_prototype_transforms_functional.py index 982d776bdd0..666176bd13a 100644 --- a/test/test_prototype_transforms_functional.py +++ b/test/test_prototype_transforms_functional.py @@ -86,7 +86,7 @@ class TestKernels: @sample_inputs @pytest.mark.parametrize("device", cpu_and_gpu()) - def test_scripted_vs_eager(self, info, args_kwargs, device): + def test_scripted_vs_eager(self, request, info, args_kwargs, device): kernel_eager = info.kernel kernel_scripted = script(kernel_eager) @@ -95,6 +95,12 @@ def test_scripted_vs_eager(self, info, args_kwargs, device): actual = kernel_scripted(*args, **kwargs) expected = kernel_eager(*args, **kwargs) + import pathlib + + artifacts = pathlib.Path(__file__).parent / "artifacts" + artifacts.mkdir(exist_ok=True) + torch.save((args, kwargs, actual, expected), str(artifacts / f"{request.node.name}.pt")) + assert_close(actual, expected, **info.closeness_kwargs) def _unbatch(self, batch, *, data_dims): diff --git a/test_gaussian_blur_non_determinism.py b/test_gaussian_blur_non_determinism.py new file mode 100644 index 00000000000..ab4891d2586 --- /dev/null +++ b/test_gaussian_blur_non_determinism.py @@ -0,0 +1,19 @@ +import torch +import tqdm +from torchvision.prototype.transforms import functional as F + + +torch.manual_seed(0) +video = torch.testing.make_tensor(4, 5, 3, 7, 33, low=0, high=255, dtype=torch.uint8, device="cpu") +print(video.sum()) +print(video[0, 0, 0, :3, :3]) +print(video[0, 0, 0, -3:, -3:]) + +num_calls = 1_000_000 +num_failing = 0 +for _ in tqdm.tqdm(range(num_calls), mininterval=5): + output = F.gaussian_blur_video(video, kernel_size=3) + if output[3, 0, 0, 2, 10] != 150: + num_failing += 1 + +print(f"{num_failing:_} ({num_failing / num_calls:.1%}) calls exhibited non-determinism") diff --git a/torchvision/transforms/functional_tensor.py b/torchvision/transforms/functional_tensor.py index 20b76fbf079..86bb89d8b7c 100644 --- a/torchvision/transforms/functional_tensor.py +++ b/torchvision/transforms/functional_tensor.py @@ -730,8 +730,7 @@ def _get_gaussian_kernel1d(kernel_size: int, sigma: float) -> Tensor: ksize_half = (kernel_size - 1) * 0.5 x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size) - pdf = torch.exp(-0.5 * (x / sigma).pow(2)) - kernel1d = pdf / pdf.sum() + kernel1d = torch.softmax(-0.5 * (x / sigma).pow(2), dim=0) return kernel1d