We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 36bacf7 commit 57b4b47Copy full SHA for 57b4b47
tests/unittest/llmapi/test_llm_multi_gpu.py
@@ -436,18 +436,12 @@ def __init__(
436
@skip_single_gpu
437
@pytest.mark.parametrize("pytorch_backend", [False, True])
438
def test_llm_get_stats_tp2(pytorch_backend):
439
- if pytorch_backend:
440
- pytest.skip("https://nvbugs/5150466: Flaky hang")
441
- return
442
llm_get_stats_test_harness(tp_size=2, pytorch_backend=pytorch_backend)
443
444
445
446
447
def test_llm_get_stats_async_tp2(pytorch_backend):
448
449
450
451
llm_get_stats_async_test_harness(tp_size=2, pytorch_backend=pytorch_backend)
452
453
0 commit comments