Skip to content

Commit e0467c6

Browse files
Skip big model in test to reduce memory usage in CI (#5903)
* Skip big model in test * Let get_models_from_module func to read skipped_models from global directly instead of function param to reduce changes * Also skip regnet_y_128gf * Only skip test for test_classification_model and create toggle using env var * Remove unnecessary comment * Fix comparison of device to use str * Add logic to test_classification_model directly * Add kprcnn in autocast flaky list Co-authored-by: Vasilis Vryniotis <[email protected]>
1 parent a46a323 commit e0467c6

File tree

1 file changed

+14
-4
lines changed

1 file changed

+14
-4
lines changed

test/test_models.py

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
from torchvision import models
1919

2020
ACCEPT = os.getenv("EXPECTTEST_ACCEPT", "0") == "1"
21+
SKIP_BIG_MODEL = os.getenv("SKIP_BIG_MODEL", "1") == "1"
2122

2223

2324
def get_models_from_module(module):
@@ -231,6 +232,7 @@ def _check_input_backprop(model, inputs):
231232
"lraspp_mobilenet_v3_large",
232233
"maskrcnn_resnet50_fpn",
233234
"maskrcnn_resnet50_fpn_v2",
235+
"keypointrcnn_resnet50_fpn",
234236
)
235237

236238
# The tests for the following quantized models are flaky possibly due to inconsistent
@@ -329,6 +331,12 @@ def _check_input_backprop(model, inputs):
329331
_model_params[m] = {"input_shape": (1, 3, 64, 64)}
330332

331333

334+
# skip big models to reduce memory usage on CI test
335+
skipped_big_models = {
336+
"vit_h_14",
337+
"regnet_y_128gf",
338+
}
339+
332340
# The following contains configuration and expected values to be used tests that are model specific
333341
_model_tests_values = {
334342
"retinanet_resnet50_fpn": {
@@ -592,6 +600,8 @@ def test_classification_model(model_fn, dev):
592600
"input_shape": (1, 3, 224, 224),
593601
}
594602
model_name = model_fn.__name__
603+
if dev == "cuda" and SKIP_BIG_MODEL and model_name in skipped_big_models:
604+
pytest.skip("Skipped to reduce memory usage. Set env var SKIP_BIG_MODEL=0 to enable test for this model")
595605
kwargs = {**defaults, **_model_params.get(model_name, {})}
596606
num_classes = kwargs.get("num_classes")
597607
input_shape = kwargs.pop("input_shape")
@@ -606,7 +616,7 @@ def test_classification_model(model_fn, dev):
606616
_check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None), eager_out=out)
607617
_check_fx_compatible(model, x, eager_out=out)
608618

609-
if dev == torch.device("cuda"):
619+
if dev == "cuda":
610620
with torch.cuda.amp.autocast():
611621
out = model(x)
612622
# See autocast_flaky_numerics comment at top of file.
@@ -659,7 +669,7 @@ def check_out(out):
659669
_check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None), eager_out=out)
660670
_check_fx_compatible(model, x, eager_out=out)
661671

662-
if dev == torch.device("cuda"):
672+
if dev == "cuda":
663673
with torch.cuda.amp.autocast():
664674
out = model(x)
665675
# See autocast_flaky_numerics comment at top of file.
@@ -757,7 +767,7 @@ def compute_mean_std(tensor):
757767
full_validation = check_out(out)
758768
_check_jit_scriptable(model, ([x],), unwrapper=script_model_unwrapper.get(model_name, None), eager_out=out)
759769

760-
if dev == torch.device("cuda"):
770+
if dev == "cuda":
761771
with torch.cuda.amp.autocast():
762772
out = model(model_input)
763773
# See autocast_flaky_numerics comment at top of file.
@@ -823,7 +833,7 @@ def test_video_model(model_fn, dev):
823833
_check_fx_compatible(model, x, eager_out=out)
824834
assert out.shape[-1] == 50
825835

826-
if dev == torch.device("cuda"):
836+
if dev == "cuda":
827837
with torch.cuda.amp.autocast():
828838
out = model(x)
829839
assert out.shape[-1] == 50

0 commit comments

Comments
 (0)