Skip to content

Commit eb69d68

Browse files
authoredApr 2, 2024··
[Misc] [CI/Build] Speed up block manager CPU-only unit tests ~10x by opting-out of GPU cleanup (#3783)
1 parent 7d4e1b8 commit eb69d68

File tree

3 files changed

+25
-18
lines changed

3 files changed

+25
-18
lines changed
 

‎tests/conftest.py

+12-2
Original file line numberDiff line numberDiff line change
@@ -55,10 +55,20 @@ def cleanup():
5555
torch.cuda.empty_cache()
5656

5757

58+
@pytest.fixture()
59+
def should_do_global_cleanup_after_test() -> bool:
60+
"""Allow subdirectories to skip global cleanup by overriding this fixture.
61+
This can provide a ~10x speedup for non-GPU unit tests since they don't need
62+
to initialize torch.
63+
"""
64+
return True
65+
66+
5867
@pytest.fixture(autouse=True)
59-
def cleanup_fixture():
68+
def cleanup_fixture(should_do_global_cleanup_after_test: bool):
6069
yield
61-
cleanup()
70+
if should_do_global_cleanup_after_test:
71+
cleanup()
6272

6373

6474
@pytest.fixture(scope="session")

‎tests/core/block/conftest.py

+12
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import pytest
2+
3+
4+
@pytest.fixture()
5+
def should_do_global_cleanup_after_test() -> bool:
6+
"""Disable the global cleanup fixture for tests in this directory. This
7+
provides a ~10x speedup for unit tests that don't load a model to GPU.
8+
9+
This requires that tests in this directory clean up after themselves if they
10+
use the GPU.
11+
"""
12+
return False

‎tests/core/block/e2e/conftest.py

+1-16
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,10 @@
1-
import contextlib
2-
import gc
3-
41
import pytest
5-
import ray
6-
import torch
72

3+
from tests.conftest import cleanup
84
from vllm import LLM
9-
from vllm.model_executor.parallel_utils.parallel_state import (
10-
destroy_model_parallel)
115
from vllm.model_executor.utils import set_random_seed
126

137

14-
def cleanup():
15-
destroy_model_parallel()
16-
with contextlib.suppress(AssertionError):
17-
torch.distributed.destroy_process_group()
18-
gc.collect()
19-
torch.cuda.empty_cache()
20-
ray.shutdown()
21-
22-
238
@pytest.fixture
249
def baseline_llm_generator(common_llm_kwargs, per_test_common_llm_kwargs,
2510
baseline_llm_kwargs, seed):

0 commit comments

Comments
 (0)
Please sign in to comment.