Skip to content

[Doc] Update references to doc files #18637

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 23, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions .github/mergify.yml
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ pull_request_rules:
- files~=^benchmarks/structured_schemas/
- files=benchmarks/benchmark_serving_structured_output.py
- files=benchmarks/run_structured_output_benchmark.sh
- files=docs/source/features/structured_outputs.md
- files=docs/features/structured_outputs.md
- files=examples/offline_inference/structured_outputs.py
- files=examples/online_serving/openai_chat_completion_structured_outputs.py
- files=examples/online_serving/openai_chat_completion_structured_outputs_with_reasoning.py
Expand Down Expand Up @@ -135,9 +135,7 @@ pull_request_rules:
- files~=^tests/entrypoints/openai/tool_parsers/
- files=tests/entrypoints/openai/test_chat_with_tool_reasoning.py
- files~=^vllm/entrypoints/openai/tool_parsers/
- files=docs/source/features/tool_calling.md
- files=docs/source/getting_started/examples/openai_chat_completion_client_with_tools.md
- files=docs/source/getting_started/examples/chat_with_tools.md
- files=docs/features/tool_calling.md
- files~=^examples/tool_chat_*
- files=examples/offline_inference/chat_with_tools.py
- files=examples/online_serving/openai_chat_completion_client_with_tools_required.py
Expand Down
4 changes: 2 additions & 2 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
# to run the OpenAI compatible server.

# Please update any changes made here to
# docs/source/contributing/dockerfile/dockerfile.md and
# docs/source/assets/contributing/dockerfile-stages-dependency.png
# docs/contributing/dockerfile/dockerfile.md and
# docs/assets/contributing/dockerfile-stages-dependency.png

ARG CUDA_VERSION=12.8.1
#################### BASE BUILD IMAGE ####################
Expand Down
5 changes: 2 additions & 3 deletions docs/contributing/overview.md
Original file line number Diff line number Diff line change
Expand Up @@ -130,9 +130,8 @@ The PR needs to meet the following code quality standards:
understand the code.
- Include sufficient tests to ensure the project stays correct and robust. This
includes both unit tests and integration tests.
- Please add documentation to `docs/source/` if the PR modifies the
user-facing behaviors of vLLM. It helps vLLM users understand and utilize the
new features or changes.
- Please add documentation to `docs/` if the PR modifies the user-facing behaviors of vLLM.
It helps vLLM users understand and utilize the new features or changes.

### Adding or Changing Kernels

Expand Down
Binary file not shown.
2 changes: 1 addition & 1 deletion tools/update-dockerfile-graph.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ if printf '%s\n' "${FILES[@]}" | grep -q "^docker/Dockerfile$"; then
fi

# Define the target file path
TARGET_GRAPH_FILE="docs/source/assets/contributing/dockerfile-stages-dependency.png"
TARGET_GRAPH_FILE="docs/assets/contributing/dockerfile-stages-dependency.png"

# Ensure target directory exists
mkdir -p "$(dirname "$TARGET_GRAPH_FILE")"
Expand Down
4 changes: 2 additions & 2 deletions vllm/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -988,7 +988,7 @@ def verify_async_output_proc(self, parallel_config, speculative_config,
self.use_async_output_proc = False
return

# Reminder: Please update docs/source/features/compatibility_matrix.md
# Reminder: Please update docs/features/compatibility_matrix.md
# If the feature combo become valid
from vllm.platforms import current_platform
if not current_platform.is_async_output_supported(self.enforce_eager):
Expand All @@ -1004,7 +1004,7 @@ def verify_async_output_proc(self, parallel_config, speculative_config,
if self.runner_type == "pooling":
self.use_async_output_proc = False

# Reminder: Please update docs/source/features/compatibility_matrix.md
# Reminder: Please update docs/features/compatibility_matrix.md
# If the feature combo become valid
if speculative_config:
self.use_async_output_proc = False
Expand Down
2 changes: 1 addition & 1 deletion vllm/engine/arg_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1084,7 +1084,7 @@ def create_engine_config(
disable_log_stats=self.disable_log_stats,
)

# Reminder: Please update docs/source/features/compatibility_matrix.md
# Reminder: Please update docs/features/compatibility_matrix.md
# If the feature combo become valid
if self.num_scheduler_steps > 1:
if speculative_config is not None:
Expand Down
2 changes: 1 addition & 1 deletion vllm/engine/output_processor/multi_step.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def process_prompt_logprob(self, seq_group: SequenceGroup,
@staticmethod
@functools.lru_cache
def _log_prompt_logprob_unsupported_warning_once():
# Reminder: Please update docs/source/features/compatibility_matrix.md
# Reminder: Please update docs/features/compatibility_matrix.md
# If the feature combo become valid
logger.warning(
"Prompt logprob is not supported by multi step workers. "
Expand Down
2 changes: 1 addition & 1 deletion vllm/platforms/cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
import vllm.envs as envs
from vllm.utils import GiB_bytes
model_config = vllm_config.model_config
# Reminder: Please update docs/source/features/compatibility_matrix.md
# Reminder: Please update docs/features/compatibility_matrix.md
# If the feature combo become valid
if not model_config.enforce_eager:
model_config.enforce_eager = True
Expand Down
2 changes: 1 addition & 1 deletion vllm/spec_decode/spec_decode_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def create_spec_worker(*args, **kwargs) -> "SpecDecodeWorker":
return spec_decode_worker


# Reminder: Please update docs/source/features/compatibility_matrix.md
# Reminder: Please update docs/features/compatibility_matrix.md
# If the feature combo become valid
class SpecDecodeWorker(LoRANotSupportedWorkerBase):
"""Worker which implements speculative decoding.
Expand Down
2 changes: 1 addition & 1 deletion vllm/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@

# Exception strings for non-implemented encoder/decoder scenarios

# Reminder: Please update docs/source/features/compatibility_matrix.md
# Reminder: Please update docs/features/compatibility_matrix.md
# If the feature combo become valid

STR_NOT_IMPL_ENC_DEC_SWA = \
Expand Down
2 changes: 1 addition & 1 deletion vllm/worker/multi_step_model_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -824,7 +824,7 @@ def _pythonize_sampler_output(

for sgdx, (seq_group,
sample_result) in enumerate(zip(seq_groups, samples_list)):
# Reminder: Please update docs/source/features/compatibility_matrix.md
# Reminder: Please update docs/features/compatibility_matrix.md
# If the feature combo become valid
# (Check for Guided Decoding)
if seq_group.sampling_params.logits_processors:
Expand Down
2 changes: 1 addition & 1 deletion vllm/worker/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def assert_enc_dec_mr_supported_scenario(
a supported scenario.
'''

# Reminder: Please update docs/source/features/compatibility_matrix.md
# Reminder: Please update docs/features/compatibility_matrix.md
# If the feature combo become valid

if enc_dec_mr.cache_config.enable_prefix_caching:
Expand Down