From d2b50a4b9ad7cad6011a7728ae1a44d0e89f6af3 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Thu, 3 Apr 2025 10:42:54 +0800 Subject: [PATCH 1/3] add seed Signed-off-by: youkaichao --- examples/offline_inference/torchrun_example.py | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/offline_inference/torchrun_example.py b/examples/offline_inference/torchrun_example.py index 35df6011550..d239dde77c6 100644 --- a/examples/offline_inference/torchrun_example.py +++ b/examples/offline_inference/torchrun_example.py @@ -27,6 +27,7 @@ model="facebook/opt-125m", tensor_parallel_size=2, distributed_executor_backend="external_launcher", + seed=0, ) outputs = llm.generate(prompts, sampling_params) From 9a0ca22be2d78af2eb30fe2d83904cb09c2a76be Mon Sep 17 00:00:00 2001 From: youkaichao Date: Thu, 3 Apr 2025 10:52:37 +0800 Subject: [PATCH 2/3] add comments Signed-off-by: youkaichao --- examples/offline_inference/torchrun_example.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/examples/offline_inference/torchrun_example.py b/examples/offline_inference/torchrun_example.py index d239dde77c6..7a57f29a07f 100644 --- a/examples/offline_inference/torchrun_example.py +++ b/examples/offline_inference/torchrun_example.py @@ -23,6 +23,9 @@ # Use `distributed_executor_backend="external_launcher"` so that # this llm engine/instance only creates one worker. +# it is important to set an explicit seed to make sure that +# all ranks have the same random seed, so that sampling can be +# deterministic across ranks. llm = LLM( model="facebook/opt-125m", tensor_parallel_size=2, From 9369ba5e24d6393edeb6bf7b7668e02bc2ad85ed Mon Sep 17 00:00:00 2001 From: youkaichao Date: Thu, 3 Apr 2025 11:53:10 +0800 Subject: [PATCH 3/3] add assert Signed-off-by: youkaichao --- vllm/config.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vllm/config.py b/vllm/config.py index 1255d716a2e..69cde4e362c 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -761,6 +761,12 @@ def verify_with_parallel_config( self, parallel_config: "ParallelConfig", ) -> None: + + if parallel_config.distributed_executor_backend == "external_launcher": + assert self.seed is not None, ( + "Seed must be set when using external launcher backend to " + "make sure sampling results are the same across workers.") + total_num_attention_heads = getattr(self.hf_text_config, "num_attention_heads", 0) tensor_parallel_size = parallel_config.tensor_parallel_size