Skip to content

Skip big model in test to reduce memory usage in CI #5903

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 12 commits into from
Apr 28, 2022
Merged
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 13 additions & 4 deletions test/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from torchvision import models

ACCEPT = os.getenv("EXPECTTEST_ACCEPT", "0") == "1"
SKIP_BIG_MODEL = os.getenv("SKIP_BIG_MODEL", "1") == "1"


def get_models_from_module(module):
Expand Down Expand Up @@ -329,6 +330,12 @@ def _check_input_backprop(model, inputs):
_model_params[m] = {"input_shape": (1, 3, 64, 64)}


# skip big models to reduce memory usage on CI test
skipped_big_models = {
"vit_h_14",
"regnet_y_128gf",
}

# The following contains configuration and expected values to be used tests that are model specific
_model_tests_values = {
"retinanet_resnet50_fpn": {
Expand Down Expand Up @@ -592,6 +599,8 @@ def test_classification_model(model_fn, dev):
"input_shape": (1, 3, 224, 224),
}
model_name = model_fn.__name__
if dev == "cuda" and SKIP_BIG_MODEL and model_name in skipped_big_models:
pytest.skip("Skipped to reduce memory usage. Set env var SKIP_BIG_MODEL=0 to enable test for this model")
kwargs = {**defaults, **_model_params.get(model_name, {})}
num_classes = kwargs.get("num_classes")
input_shape = kwargs.pop("input_shape")
Expand All @@ -606,7 +615,7 @@ def test_classification_model(model_fn, dev):
_check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None), eager_out=out)
_check_fx_compatible(model, x, eager_out=out)

if dev == torch.device("cuda"):
if dev == "cuda":
with torch.cuda.amp.autocast():
out = model(x)
# See autocast_flaky_numerics comment at top of file.
Expand Down Expand Up @@ -659,7 +668,7 @@ def check_out(out):
_check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None), eager_out=out)
_check_fx_compatible(model, x, eager_out=out)

if dev == torch.device("cuda"):
if dev == "cuda":
with torch.cuda.amp.autocast():
out = model(x)
# See autocast_flaky_numerics comment at top of file.
Expand Down Expand Up @@ -757,7 +766,7 @@ def compute_mean_std(tensor):
full_validation = check_out(out)
_check_jit_scriptable(model, ([x],), unwrapper=script_model_unwrapper.get(model_name, None), eager_out=out)

if dev == torch.device("cuda"):
if dev == "cuda":
with torch.cuda.amp.autocast():
out = model(model_input)
# See autocast_flaky_numerics comment at top of file.
Expand Down Expand Up @@ -823,7 +832,7 @@ def test_video_model(model_fn, dev):
_check_fx_compatible(model, x, eager_out=out)
assert out.shape[-1] == 50

if dev == torch.device("cuda"):
if dev == "cuda":
with torch.cuda.amp.autocast():
out = model(x)
assert out.shape[-1] == 50
Expand Down