File tree 1 file changed +1
-5
lines changed
1 file changed +1
-5
lines changed Original file line number Diff line number Diff line change 16
16
from vllm .lora .request import LoRARequest
17
17
from vllm .model_executor .layers .sampler import SamplerOutput
18
18
from vllm .sequence import ExecuteModelRequest
19
- from vllm .utils import (get_distributed_init_method , get_open_port ,
20
- get_vllm_instance_id )
19
+ from vllm .utils import get_distributed_init_method , get_open_port
21
20
22
21
logger = init_logger (__name__ )
23
22
@@ -73,9 +72,6 @@ def _init_executor(self) -> None:
73
72
world_size = self .parallel_config .world_size
74
73
tensor_parallel_size = self .parallel_config .tensor_parallel_size
75
74
76
- # Ensure that VLLM_INSTANCE_ID is set, to be inherited by workers
77
- os .environ ["VLLM_INSTANCE_ID" ] = get_vllm_instance_id ()
78
-
79
75
# Disable torch async compiling which won't work with daemonic processes
80
76
# [tom] it doesn't seme to work setting this from the code, we need to
81
77
# set at command line. hopefully will be fixed by upgrading torch.
You can’t perform that action at this time.
0 commit comments