|
| 1 | +version: 2.1 |
| 2 | + |
| 3 | +#examples: |
| 4 | +#https://github.com/facebookresearch/ParlAI/blob/master/.circleci/config.yml |
| 5 | +#https://github.com/facebookresearch/hydra/blob/master/.circleci/config.yml |
| 6 | +#https://github.com/facebookresearch/habitat-api/blob/master/.circleci/config.yml |
| 7 | + |
| 8 | +#drive tests with nox or tox or pytest? |
| 9 | + |
| 10 | +# ------------------------------------------------------------------------------------- |
| 11 | +# environments where we run our jobs |
| 12 | +# ------------------------------------------------------------------------------------- |
| 13 | + |
| 14 | + |
| 15 | +setupcuda: &setupcuda |
| 16 | + run: |
| 17 | + name: Setup CUDA |
| 18 | + working_directory: ~/ |
| 19 | + command: | |
| 20 | + # download and install nvidia drivers, cuda, etc |
| 21 | + wget --no-verbose --no-clobber -P ~/nvidia-downloads 'https://s3.amazonaws.com/ossci-linux/nvidia_driver/NVIDIA-Linux-x86_64-430.40.run' |
| 22 | + wget --no-verbose --no-clobber -P ~/nvidia-downloads http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_440.33.01_linux.run |
| 23 | + sudo /bin/bash ~/nvidia-downloads/NVIDIA-Linux-x86_64-430.40.run --no-drm -q --ui=none |
| 24 | + sudo sh ~/nvidia-downloads/cuda_10.2.89_440.33.01_linux.run --silent |
| 25 | + echo "Done installing CUDA." |
| 26 | + pyenv versions |
| 27 | + nvidia-smi |
| 28 | + pyenv global 3.7.0 |
| 29 | +
|
| 30 | +gpu: &gpu |
| 31 | + environment: |
| 32 | + CUDA_VERSION: "10.2" |
| 33 | + machine: |
| 34 | + image: default |
| 35 | + resource_class: gpu.medium # tesla m60 |
| 36 | + |
| 37 | +binary_common: &binary_common |
| 38 | + parameters: |
| 39 | + # Edit these defaults to do a release` |
| 40 | + build_version: |
| 41 | + description: "version number of release binary; by default, build a nightly" |
| 42 | + type: string |
| 43 | + default: "" |
| 44 | + pytorch_version: |
| 45 | + description: "PyTorch version to build against; by default, use a nightly" |
| 46 | + type: string |
| 47 | + default: "" |
| 48 | + # Don't edit these |
| 49 | + python_version: |
| 50 | + description: "Python version to build against (e.g., 3.7)" |
| 51 | + type: string |
| 52 | + cu_version: |
| 53 | + description: "CUDA version to build against, in CU format (e.g., cpu or cu100)" |
| 54 | + type: string |
| 55 | + wheel_docker_image: |
| 56 | + description: "Wheel only: what docker image to use" |
| 57 | + type: string |
| 58 | + default: "pytorch/manylinux-cuda101" |
| 59 | + environment: |
| 60 | + PYTHON_VERSION: << parameters.python_version >> |
| 61 | + BUILD_VERSION: << parameters.build_version >> |
| 62 | + PYTORCH_VERSION: << parameters.pytorch_version >> |
| 63 | + CU_VERSION: << parameters.cu_version >> |
| 64 | + |
| 65 | +jobs: |
| 66 | + main: |
| 67 | + <<: *gpu |
| 68 | + machine: |
| 69 | + image: ubuntu-1604:201903-01 |
| 70 | + steps: |
| 71 | + - checkout |
| 72 | + - <<: *setupcuda |
| 73 | + - run: pip3 install --progress-bar off wheel matplotlib 'pillow<7' |
| 74 | + - run: pip3 install --progress-bar off torch torchvision |
| 75 | + # - run: conda create -p ~/conda_env python=3.7 numpy |
| 76 | + # - run: conda activate ~/conda_env |
| 77 | + # - run: conda install -c pytorch pytorch torchvision |
| 78 | + |
| 79 | + - run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/fvcore' |
| 80 | + - run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-10.2/lib64 python3 setup.py build_ext --inplace |
| 81 | + - run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-10.2/lib64 python -m unittest discover -v -s tests |
| 82 | + - run: python3 setup.py bdist_wheel |
| 83 | + |
| 84 | + binary_linux_wheel: |
| 85 | + <<: *binary_common |
| 86 | + docker: |
| 87 | + - image: << parameters.wheel_docker_image >> |
| 88 | + resource_class: 2xlarge+ |
| 89 | + steps: |
| 90 | + - checkout |
| 91 | + - run: packaging/build_wheel.sh |
| 92 | + - store_artifacts: |
| 93 | + path: dist |
| 94 | + - persist_to_workspace: |
| 95 | + root: dist |
| 96 | + paths: |
| 97 | + - "*" |
| 98 | + |
| 99 | + binary_linux_conda: |
| 100 | + <<: *binary_common |
| 101 | + docker: |
| 102 | + - image: "pytorch/conda-cuda" |
| 103 | + resource_class: 2xlarge+ |
| 104 | + steps: |
| 105 | + - checkout |
| 106 | + # This is building with cuda but no gpu present, |
| 107 | + # so we aren't running the tests. |
| 108 | + - run: TEST_FLAG=--no-test packaging/build_conda.sh |
| 109 | + - store_artifacts: |
| 110 | + path: /opt/conda/conda-bld/linux-64 |
| 111 | + - persist_to_workspace: |
| 112 | + root: /opt/conda/conda-bld/linux-64 |
| 113 | + paths: |
| 114 | + - "*" |
| 115 | + |
| 116 | + binary_linux_conda_cuda: |
| 117 | + <<: *binary_common |
| 118 | + machine: |
| 119 | + image: ubuntu-1604:201903-01 |
| 120 | + resource_class: gpu.medium |
| 121 | + steps: |
| 122 | + - checkout |
| 123 | + - run: |
| 124 | + name: Setup environment |
| 125 | + command: | |
| 126 | + set -e |
| 127 | +
|
| 128 | + curl -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add - |
| 129 | + curl -L https://dl.google.com/linux/linux_signing_key.pub | sudo apt-key add - |
| 130 | +
|
| 131 | + sudo apt-get update |
| 132 | +
|
| 133 | + sudo apt-get install \ |
| 134 | + apt-transport-https \ |
| 135 | + ca-certificates \ |
| 136 | + curl \ |
| 137 | + gnupg-agent \ |
| 138 | + software-properties-common |
| 139 | +
|
| 140 | + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - |
| 141 | +
|
| 142 | + sudo add-apt-repository \ |
| 143 | + "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ |
| 144 | + $(lsb_release -cs) \ |
| 145 | + stable" |
| 146 | +
|
| 147 | + sudo apt-get update |
| 148 | + export DOCKER_VERSION="5:19.03.2~3-0~ubuntu-xenial" |
| 149 | + sudo apt-get install docker-ce=${DOCKER_VERSION} docker-ce-cli=${DOCKER_VERSION} containerd.io=1.2.6-3 |
| 150 | +
|
| 151 | + # Add the package repositories |
| 152 | + distribution=$(. /etc/os-release;echo $ID$VERSION_ID) |
| 153 | + curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - |
| 154 | + curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list |
| 155 | +
|
| 156 | + export NVIDIA_CONTAINER_VERSION="1.0.3-1" |
| 157 | + sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit=${NVIDIA_CONTAINER_VERSION} |
| 158 | + sudo systemctl restart docker |
| 159 | +
|
| 160 | + DRIVER_FN="NVIDIA-Linux-x86_64-410.104.run" |
| 161 | + wget "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN" |
| 162 | + sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false) |
| 163 | + nvidia-smi |
| 164 | +
|
| 165 | + - run: |
| 166 | + name: Pull docker image |
| 167 | + command: | |
| 168 | + set -e |
| 169 | + export DOCKER_IMAGE=pytorch/conda-cuda |
| 170 | + echo Pulling docker image $DOCKER_IMAGE |
| 171 | + docker pull $DOCKER_IMAGE >/dev/null |
| 172 | +
|
| 173 | + - run: |
| 174 | + name: Build and run tests |
| 175 | + command: | |
| 176 | + set -e |
| 177 | +
|
| 178 | + cd ${HOME}/project/ |
| 179 | +
|
| 180 | + export DOCKER_IMAGE=pytorch/conda-cuda |
| 181 | + export VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e UNICODE_ABI -e CU_VERSION" |
| 182 | +
|
| 183 | + docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${DOCKER_IMAGE} ./packaging/build_conda.sh |
| 184 | +
|
| 185 | +workflows: |
| 186 | + version: 2 |
| 187 | + build_and_test: |
| 188 | + jobs: |
| 189 | + - main |
| 190 | + {{workflows()}} |
| 191 | + - binary_linux_conda: |
| 192 | + cu_version: cu101 |
| 193 | + name: binary_linux_conda_py3.7_cu101 |
| 194 | + python_version: '3.7' |
| 195 | + - binary_linux_conda_cuda: |
| 196 | + name: testrun_conda_cuda_py3.7_cu100 |
| 197 | + python_version: "3.7" |
| 198 | + pytorch_version: "1.4" |
| 199 | + cu_version: "cu100" |
0 commit comments