Skip to content
This repository was archived by the owner on Oct 11, 2024. It is now read-only.

Commit 71c459f

Browse files
zifeitongRobert Shaw
authored and
Robert Shaw
committed
[Bugfix] Properly set distributed_executor_backend in ParallelConfig (vllm-project#4816)
1 parent e40b747 commit 71c459f

File tree

2 files changed

+8
-3
lines changed

2 files changed

+8
-3
lines changed

vllm/config.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -575,6 +575,7 @@ class ParallelConfig:
575575
If None, will use synchronous tokenization.
576576
ray_workers_use_nsight: Whether to profile Ray workers with nsight, see
577577
https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html#profiling-nsight-profiler.
578+
placement_group: ray distributed model workers placement group.
578579
distributed_executor_backend: Backend to use for distributed model
579580
workers, either "ray" or "mp" (multiprocessing). If either
580581
pipeline_parallel_size or tensor_parallel_size is greater than 1,

vllm/engine/arg_utils.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -576,14 +576,18 @@ def create_engine_config(self, ) -> EngineConfig:
576576
model_config.get_sliding_window(),
577577
self.enable_prefix_caching)
578578
parallel_config = ParallelConfig(
579-
self.pipeline_parallel_size, self.tensor_parallel_size,
580-
self.worker_use_ray, self.max_parallel_loading_workers,
579+
self.pipeline_parallel_size,
580+
self.tensor_parallel_size,
581+
self.worker_use_ray,
582+
self.max_parallel_loading_workers,
581583
self.disable_custom_all_reduce,
582584
TokenizerPoolConfig.create_config(
583585
self.tokenizer_pool_size,
584586
self.tokenizer_pool_type,
585587
self.tokenizer_pool_extra_config,
586-
), self.ray_workers_use_nsight)
588+
),
589+
self.ray_workers_use_nsight,
590+
distributed_executor_backend=self.distributed_executor_backend)
587591

588592
speculative_config = SpeculativeConfig.maybe_create_spec_config(
589593
target_model_config=model_config,

0 commit comments

Comments
 (0)