Skip to content

Commit 2ed744d

Browse files
cleanup
Summary: Co-authored-by: Ye (Charlotte) Qi <[email protected]> Co-authored-by: Kai Wu <[email protected]> Signed-off-by: Ye (Charlotte) Qi <[email protected]>
1 parent d94ad04 commit 2ed744d

File tree

3 files changed

+1
-10
lines changed

3 files changed

+1
-10
lines changed

vllm/entrypoints/chat_utils.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1170,8 +1170,6 @@ def apply_hf_chat_template(
11701170
"allowed, so you must provide a chat template if the tokenizer "
11711171
"does not define one.")
11721172

1173-
logger.warning(f"{tools=}")
1174-
logger.warning(f"{conversation=}")
11751173
return tokenizer.apply_chat_template(
11761174
conversation=conversation, # type: ignore[arg-type]
11771175
tools=tools, # type: ignore[arg-type]

vllm/entrypoints/openai/serving_chat.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -135,8 +135,6 @@ async def create_chat_completion(
135135
logger.error("Error with model %s", error_check_ret)
136136
return error_check_ret
137137

138-
logger.warning(f"{request=}")
139-
140138
# If the engine is dead, raise the engine's DEAD_ERROR.
141139
# This is required for the streaming case, where we return a
142140
# success status before we actually start generating text :).
@@ -175,8 +173,6 @@ async def create_chat_completion(
175173
tool_dicts = None if request.tools is None else [
176174
tool.model_dump() for tool in request.tools
177175
]
178-
logger.warning(f"{tool_dicts=}")
179-
logger.warning(f"{request.tools=}")
180176

181177
(
182178
conversation,
@@ -197,9 +193,6 @@ async def create_chat_completion(
197193
truncate_prompt_tokens=request.truncate_prompt_tokens,
198194
add_special_tokens=request.add_special_tokens,
199195
)
200-
logger.warning(f"{conversation=}")
201-
logger.warning(f"{request_prompts=}")
202-
logger.warning(f"{engine_prompts=}")
203196
except (ValueError, TypeError, RuntimeError,
204197
jinja2.TemplateError) as e:
205198
logger.exception("Error in preprocessing prompt inputs")

vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def extract_tool_calls(
6161
"""
6262
Extract the tool calls from a complete model response.
6363
"""
64-
print(f"{model_output=}")
64+
6565
if not (self.TOOL_CALL_REGEX.match(model_output)):
6666
return ExtractedToolCallInformation(tools_called=False,
6767
tool_calls=[],

0 commit comments

Comments
 (0)