Skip to content

Commit ef7e1ec

Browse files
committed
chore: Keep compatibility
Signed-off-by: Ce Gao <[email protected]>
1 parent 2431371 commit ef7e1ec

File tree

3 files changed

+105
-43
lines changed

3 files changed

+105
-43
lines changed

Diff for: examples/online_serving/openai_chat_completion_with_reasoning.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,8 @@
3636
reasoning_content = response.choices[0].message.reasoning_content
3737
content = response.choices[0].message.content
3838

39-
print("reasoning_content:", reasoning_content)
40-
print("content:", content)
39+
print("reasoning_content for Round 1:", reasoning_content)
40+
print("content for Round 1:", content)
4141

4242
# Round 2
4343
messages.append({"role": "assistant", "content": content})
@@ -50,5 +50,5 @@
5050
reasoning_content = response.choices[0].message.reasoning_content
5151
content = response.choices[0].message.content
5252

53-
print("reasoning_content:", reasoning_content)
54-
print("content:", content)
53+
print("reasoning_content for Round 2:", reasoning_content)
54+
print("content for Round 2:", content)

Diff for: tests/entrypoints/openai/reasoning_parsers/test_deepseekr1_reasoning_parser.py

+66-16
Original file line numberDiff line numberDiff line change
@@ -15,71 +15,91 @@
1515
end_token = "</think>"
1616

1717
SIMPLE_REASONING = {
18-
"output": "<think>This is a reasoning section</think>This is the rest",
18+
"output": "This is a reasoning section</think>This is the rest",
1919
"reasoning_content": "This is a reasoning section",
2020
"content": "This is the rest",
2121
}
2222
COMPLETE_REASONING = {
23-
"output": "<think>This is a reasoning section</think>",
23+
"output": "This is a reasoning section</think>",
2424
"reasoning_content": "This is a reasoning section",
2525
"content": None,
2626
}
2727
NO_REASONING = {
28-
"output": "This is a reasoning section",
28+
"output": "This is content",
2929
"reasoning_content": None,
30-
"content": "This is a reasoning section",
30+
"content": "This is content",
31+
}
32+
NO_REASONING_STREAMING = {
33+
"output": "This is a reasoning section",
34+
"reasoning_content": "This is a reasoning section",
35+
"content": None,
3136
}
3237
MULTIPLE_LINES = {
33-
"output": "<think>This\nThat</think>This is the rest\nThat",
38+
"output": "This\nThat</think>This is the rest\nThat",
3439
"reasoning_content": "This\nThat",
3540
"content": "This is the rest\nThat",
3641
}
3742
SHORTEST_REASONING_NO_STREAMING = {
38-
"output": "<think></think>This is the rest",
43+
"output": "</think>This is the rest",
3944
"reasoning_content": "",
4045
"content": "This is the rest",
4146
}
4247
SHORTEST_REASONING = {
43-
"output": "<think></think>This is the rest",
48+
"output": "</think>This is the rest",
4449
"reasoning_content": None,
4550
"content": "This is the rest",
4651
}
52+
REASONING_WITH_THINK = {
53+
"output": "<think>This is a reasoning section</think>This is the rest",
54+
"reasoning_content": "This is a reasoning section",
55+
"content": "This is the rest",
56+
}
57+
COMPLETE_REASONING_WITH_THINK = {
58+
"output": "<think>This is a reasoning section</think>",
59+
"reasoning_content": "This is a reasoning section",
60+
"content": None,
61+
}
62+
MULTIPLE_LINES_WITH_THINK = {
63+
"output": "<think>This\nThat</think>This is the rest\nThat",
64+
"reasoning_content": "This\nThat",
65+
"content": "This is the rest\nThat",
66+
}
4767

4868
TEST_CASES = [
4969
pytest.param(
5070
False,
5171
SIMPLE_REASONING,
52-
id="simple_streaming",
72+
id="simple_reasoning",
5373
),
5474
pytest.param(
5575
True,
5676
SIMPLE_REASONING,
57-
id="simple_streaming",
77+
id="simple_reasoning_streaming",
5878
),
5979
pytest.param(
6080
False,
6181
COMPLETE_REASONING,
62-
id="complete_streaming",
82+
id="complete_reasoning",
6383
),
6484
pytest.param(
6585
True,
6686
COMPLETE_REASONING,
67-
id="complete_streaming",
87+
id="complete_reasoning_streaming",
6888
),
6989
pytest.param(
7090
False,
7191
NO_REASONING,
72-
id="no_streaming",
92+
id="no_reasoning_token",
7393
),
7494
pytest.param(
7595
True,
76-
NO_REASONING,
77-
id="no_streaming",
96+
NO_REASONING_STREAMING,
97+
id="no_reasoning_token_streaming",
7898
),
7999
pytest.param(
80100
False,
81101
MULTIPLE_LINES,
82-
id="multiple_lines_streaming",
102+
id="multiple_lines",
83103
),
84104
pytest.param(
85105
True,
@@ -89,13 +109,43 @@
89109
pytest.param(
90110
True,
91111
SHORTEST_REASONING,
92-
id="shortest_streaming",
112+
id="shortest",
93113
),
94114
pytest.param(
95115
False,
96116
SHORTEST_REASONING_NO_STREAMING,
97117
id="shortest_streaming",
98118
),
119+
pytest.param(
120+
False,
121+
REASONING_WITH_THINK,
122+
id="reasoning_with_think",
123+
),
124+
pytest.param(
125+
True,
126+
REASONING_WITH_THINK,
127+
id="reasoning_with_think_streaming",
128+
),
129+
pytest.param(
130+
False,
131+
COMPLETE_REASONING_WITH_THINK,
132+
id="complete_reasoning_with_think",
133+
),
134+
pytest.param(
135+
True,
136+
COMPLETE_REASONING_WITH_THINK,
137+
id="complete_reasoning_with_think_streaming",
138+
),
139+
pytest.param(
140+
False,
141+
MULTIPLE_LINES_WITH_THINK,
142+
id="multiple_lines_with_think",
143+
),
144+
pytest.param(
145+
True,
146+
MULTIPLE_LINES_WITH_THINK,
147+
id="multiple_lines_with_think_streaming",
148+
),
99149
]
100150

101151

Diff for: vllm/entrypoints/openai/reasoning_parsers/deepseek_r1_reasoning_parser.py

+35-23
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,8 @@ def extract_reasoning_content_streaming(
6767
]):
6868
return None
6969

70+
# Check if <think> is present in previous or delta.
71+
# Keep compatibility with models that don't generate <think> tokens.
7072
if self.think_start_token_id in previous_token_ids:
7173
if self.think_end_token_id in delta_token_ids:
7274
# <think> in previous, </think> in delta,
@@ -85,7 +87,6 @@ def extract_reasoning_content_streaming(
8587
# reasoning content continues
8688
return DeltaMessage(reasoning_content=delta_text)
8789
elif self.think_start_token_id in delta_token_ids:
88-
logger.info(delta_text)
8990
if self.think_end_token_id in delta_token_ids:
9091
# <think> in delta, </think> in delta, extract reasoning content
9192
start_index = delta_text.find(self.think_start_token)
@@ -101,35 +102,46 @@ def extract_reasoning_content_streaming(
101102
# reasoning content continues
102103
return DeltaMessage(reasoning_content=delta_text)
103104
else:
104-
# No <think> in previous or delta, reasoning content continues.
105-
return DeltaMessage(content=delta_text)
105+
# No <think> in previous or delta, also need to check for </think>.
106+
# Because the model may have generated </think> without <think>
107+
# Ref https://huggingface.co/deepseek-ai/DeepSeek-R1/commit/8a58a132790c9935686eb97f042afa8013451c9f
108+
if self.think_end_token_id in delta_token_ids:
109+
# </think> in delta with more tokens,
110+
# extract reasoning content and content
111+
end_index = delta_text.find(self.think_end_token)
112+
reasoning_content = delta_text[:end_index]
113+
content = delta_text[end_index + len(self.think_end_token):]
114+
return DeltaMessage(reasoning_content=reasoning_content,
115+
content=content if content else None)
116+
elif self.think_end_token_id in previous_token_ids:
117+
# </think> in previous, thinking content ends
118+
return DeltaMessage(content=delta_text)
119+
else:
120+
# no </think> in previous or delta, reasoning content continues
121+
return DeltaMessage(reasoning_content=delta_text)
106122

107123
def extract_reasoning_content(
108124
self, model_output: str, request: ChatCompletionRequest
109125
) -> Tuple[Optional[str], Optional[str]]:
110126

111-
# Check if the model output contains the <think> tokens.
112-
if (self.think_start_token not in model_output
113-
or self.think_end_token not in model_output):
127+
# DeepSeek R1 doesn't generate <think> now.
128+
# Thus we assume the reasoning content is always at the start.
129+
# Ref https://huggingface.co/deepseek-ai/DeepSeek-R1/commit/8a58a132790c9935686eb97f042afa8013451c9f
130+
if self.think_end_token not in model_output:
114131
return None, model_output
115132
else:
133+
# Add a start token if it's missing to keep compatibility.
134+
if self.think_start_token not in model_output:
135+
model_output = f"{self.think_start_token}{model_output}"
116136
# Use a regex to find the reasoning content
117137
reasoning_content = self.reasoning_regex.findall(model_output)[0]
118138

119-
# Remove the reasoning content from the model output
120-
# Although deepseek's <think> token is always at the
121-
# beginning of the line, we cannot guarantee that the
122-
# other models will follow this convention.
123-
# Therefore, we need to add :start_index.
124-
start_index = model_output.find(self.think_start_token)
125-
if start_index != -1:
126-
end_index = start_index + len(
127-
f"{self.think_start_token}{reasoning_content}{self.think_end_token}"
128-
)
129-
model_output = model_output[:start_index] + \
130-
model_output[end_index:]
131-
132-
if len(model_output) == 0:
133-
return reasoning_content, None
134-
135-
return reasoning_content, model_output
139+
end_index = len(
140+
f"{self.think_start_token}{reasoning_content}{self.think_end_token}"
141+
)
142+
final_output = model_output[end_index:]
143+
144+
if len(final_output) == 0:
145+
return reasoning_content, None
146+
147+
return reasoning_content, final_output

0 commit comments

Comments
 (0)