Skip to content

[V1][Spec Decode] Make Eagle model arch config driven #17323

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Apr 29, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion vllm/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -2401,7 +2401,8 @@ def __post_init__(self):
pass
else:
eagle_config = EAGLEConfig(
self.draft_model_config.hf_config)
self.draft_model_config.hf_config,
method=self.method)
self.draft_model_config.hf_config = eagle_config

if (self.num_speculative_tokens is not None
Expand Down
19 changes: 18 additions & 1 deletion vllm/transformers_utils/configs/eagle.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ class EAGLEConfig(PretrainedConfig):
def __init__(self,
model: Union[PretrainedConfig, dict, None] = None,
truncated_vocab_size: Optional[int] = None,
method: Optional[str] = 'eagle',
**kwargs):

model_config: Union[PretrainedConfig, DeepseekV2Config, None]
Expand Down Expand Up @@ -45,7 +46,23 @@ def __init__(self,
if not envs.VLLM_USE_V1:
kwargs["architectures"] = ["EAGLEModel"]
else:
kwargs["architectures"] = ["EagleLlamaForCausalLM"]
# Eagle model name should follow naming convention of
# LlamaForCausalLM -> EagleLlamaForCausalLM
if method == "eagle":
assert self.model is not None, \
"model should not be None when method is eagle"
kwargs["architectures"] = [
f"Eagle{arch}" for arch in self.model.architectures
]
elif method == "eagle3":
assert self.model is not None, \
"model should not be None when method is eagle3"
kwargs["architectures"] = [
f"Eagle3{arch}" for arch in self.model.architectures
]
else:
raise ValueError(f"Invalid method {method}. \
Supported methods are eagle and eagle3.")

super().__init__(**kwargs)

Expand Down
17 changes: 6 additions & 11 deletions vllm/v1/spec_decode/eagle.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,7 @@
from vllm.logger import init_logger
from vllm.model_executor.model_loader.loader import get_model_loader
from vllm.model_executor.model_loader.utils import set_default_torch_dtype
from vllm.model_executor.models.llama_eagle import EagleLlamaForCausalLM
from vllm.model_executor.models.llama_eagle3 import Eagle3LlamaForCausalLM
from vllm.model_executor.models import ModelRegistry
from vllm.v1.attention.backends.flash_attn import FlashAttentionMetadata
from vllm.v1.sample.metadata import SamplingMetadata

Expand Down Expand Up @@ -225,15 +224,11 @@ def load_model(self, target_model: nn.Module) -> None:
with set_default_torch_dtype(
draft_model_config.dtype), set_current_vllm_config(
self.vllm_config):
if self.vllm_config.speculative_config.method == "eagle":
self.model = EagleLlamaForCausalLM(
model_config=draft_model_config,
start_layer_id=target_layer_num).to(target_device)
else:
assert self.vllm_config.speculative_config.method == "eagle3"
self.model = Eagle3LlamaForCausalLM(
model_config=draft_model_config,
start_layer_id=target_layer_num).to(target_device)
draft_model_cls, arch = ModelRegistry.resolve_model_cls(
draft_model_config.architectures)
self.model = draft_model_cls(
model_config=draft_model_config,
start_layer_id=target_layer_num).to(target_device)

loaded_weights = self.model.load_weights(
loader.get_all_weights(
Expand Down