diff --git a/.circleci/config.yml b/.circleci/config.yml index b0e4d0e7f9a..5773dae3957 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -146,7 +146,7 @@ commands: default: true steps: - pip_install: - args: --pre torch --extra-index-url https://download.pytorch.org/whl/nightly/cpu + args: --pre torch==1.13.0.dev20220618 --extra-index-url https://download.pytorch.org/whl/nightly/cpu descr: Install PyTorch from nightly releases - pip_install: args: --no-build-isolation <<# parameters.editable >> --editable <> . diff --git a/.circleci/config.yml.in b/.circleci/config.yml.in index 514f1c563aa..432338e9d91 100644 --- a/.circleci/config.yml.in +++ b/.circleci/config.yml.in @@ -146,7 +146,7 @@ commands: default: true steps: - pip_install: - args: --pre torch --extra-index-url https://download.pytorch.org/whl/nightly/cpu + args: --pre torch==1.13.0.dev20220618 --extra-index-url https://download.pytorch.org/whl/nightly/cpu descr: Install PyTorch from nightly releases - pip_install: args: --no-build-isolation <<# parameters.editable >> --editable <> . diff --git a/.circleci/unittest/windows/scripts/install.sh b/.circleci/unittest/windows/scripts/install.sh index cfdff3da6ba..dd414286588 100644 --- a/.circleci/unittest/windows/scripts/install.sh +++ b/.circleci/unittest/windows/scripts/install.sh @@ -34,7 +34,8 @@ else fi printf "Installing PyTorch with %s\n" "${cudatoolkit}" -conda install -y -c "pytorch-${UPLOAD_CHANNEL}" -c nvidia "pytorch-${UPLOAD_CHANNEL}"::pytorch[build="*${version}*"] "${cudatoolkit}" +# conda install -y -c "pytorch-${UPLOAD_CHANNEL}" -c nvidia "pytorch-${UPLOAD_CHANNEL}"::pytorch[build="*${version}*"] "${cudatoolkit}" +pip install --pre torch==1.13.0.dev20220618+cpu --extra-index-url https://download.pytorch.org/whl/nightly/cpu torch_cuda=$(python -c "import torch; print(torch.cuda.is_available())") echo torch.cuda.is_available is $torch_cuda diff --git a/test/test_models.py b/test/test_models.py index 0acef4dcef6..d864cb161c0 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -603,8 +603,8 @@ def test_classification_model(model_fn, dev): "input_shape": (1, 3, 224, 224), } model_name = model_fn.__name__ - if dev == "cuda" and SKIP_BIG_MODEL and model_name in skipped_big_models: - pytest.skip("Skipped to reduce memory usage. Set env var SKIP_BIG_MODEL=0 to enable test for this model") + # if dev == "cuda" and SKIP_BIG_MODEL and model_name in skipped_big_models: + # pytest.skip("Skipped to reduce memory usage. Set env var SKIP_BIG_MODEL=0 to enable test for this model") kwargs = {**defaults, **_model_params.get(model_name, {})} num_classes = kwargs.get("num_classes") input_shape = kwargs.pop("input_shape") @@ -613,9 +613,15 @@ def test_classification_model(model_fn, dev): model.eval().to(device=dev) # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests x = torch.rand(input_shape).to(device=dev) - out = model(x) + with torch.inference_mode(): + out = model(x) _assert_expected(out.cpu(), model_name, prec=0.1) assert out.shape[-1] == num_classes + + if SKIP_BIG_MODEL and model_name in skipped_big_models: + # Skip backprop test only + return + _check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None), eager_out=out) _check_fx_compatible(model, x, eager_out=out)