Skip to content

Commit fc6485d

Browse files
authored
[Bugfix]: Reasoning output bug according to the chat template change (#13025)
Signed-off-by: Ce Gao <[email protected]>
1 parent 78a141d commit fc6485d

File tree

3 files changed

+129
-45
lines changed

3 files changed

+129
-45
lines changed

Diff for: examples/online_serving/openai_chat_completion_with_reasoning.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,8 @@
3636
reasoning_content = response.choices[0].message.reasoning_content
3737
content = response.choices[0].message.content
3838

39-
print("reasoning_content:", reasoning_content)
40-
print("content:", content)
39+
print("reasoning_content for Round 1:", reasoning_content)
40+
print("content for Round 1:", content)
4141

4242
# Round 2
4343
messages.append({"role": "assistant", "content": content})
@@ -50,5 +50,5 @@
5050
reasoning_content = response.choices[0].message.reasoning_content
5151
content = response.choices[0].message.content
5252

53-
print("reasoning_content:", reasoning_content)
54-
print("content:", content)
53+
print("reasoning_content for Round 2:", reasoning_content)
54+
print("content for Round 2:", content)

Diff for: tests/entrypoints/openai/reasoning_parsers/test_deepseekr1_reasoning_parser.py

+90-18
Original file line numberDiff line numberDiff line change
@@ -15,32 +15,62 @@
1515
end_token = "</think>"
1616

1717
SIMPLE_REASONING = {
18-
"output": "<think>This is a reasoning section</think>This is the rest",
18+
"output": "This is a reasoning section</think>This is the rest",
1919
"reasoning_content": "This is a reasoning section",
2020
"content": "This is the rest",
2121
}
2222
COMPLETE_REASONING = {
23-
"output": "<think>This is a reasoning section</think>",
23+
"output": "This is a reasoning section</think>",
2424
"reasoning_content": "This is a reasoning section",
2525
"content": None,
2626
}
2727
NO_REASONING = {
28-
"output": "This is a reasoning section",
28+
"output": "This is content",
2929
"reasoning_content": None,
30-
"content": "This is a reasoning section",
30+
"content": "This is content",
31+
}
32+
NO_REASONING_STREAMING = {
33+
"output": "This is a reasoning section",
34+
"reasoning_content": "This is a reasoning section",
35+
"content": None,
3136
}
3237
MULTIPLE_LINES = {
33-
"output": "<think>This\nThat</think>This is the rest\nThat",
38+
"output": "This\nThat</think>This is the rest\nThat",
3439
"reasoning_content": "This\nThat",
3540
"content": "This is the rest\nThat",
3641
}
3742
SHORTEST_REASONING_NO_STREAMING = {
38-
"output": "<think></think>This is the rest",
43+
"output": "</think>This is the rest",
3944
"reasoning_content": "",
4045
"content": "This is the rest",
4146
}
4247
SHORTEST_REASONING = {
43-
"output": "<think></think>This is the rest",
48+
"output": "</think>This is the rest",
49+
"reasoning_content": None,
50+
"content": "This is the rest",
51+
}
52+
REASONING_WITH_THINK = {
53+
"output": "<think>This is a reasoning section</think>This is the rest",
54+
"reasoning_content": "This is a reasoning section",
55+
"content": "This is the rest",
56+
}
57+
COMPLETE_REASONING_WITH_THINK = {
58+
"output": "<think>This is a reasoning section</think>",
59+
"reasoning_content": "This is a reasoning section",
60+
"content": None,
61+
}
62+
MULTIPLE_LINES_WITH_THINK = {
63+
"output": "<think>This\nThat</think>This is the rest\nThat",
64+
"reasoning_content": "This\nThat",
65+
"content": "This is the rest\nThat",
66+
}
67+
SHORTEST_REASONING_NO_STREAMING_WITH_THINK = {
68+
"output": "</think>This is the rest",
69+
"reasoning_content": "",
70+
"content": "This is the rest",
71+
}
72+
SHORTEST_REASONING_WITH_THINK = {
73+
"output": "</think>This is the rest",
4474
"reasoning_content": None,
4575
"content": "This is the rest",
4676
}
@@ -49,37 +79,37 @@
4979
pytest.param(
5080
False,
5181
SIMPLE_REASONING,
52-
id="simple_streaming",
82+
id="simple_reasoning",
5383
),
5484
pytest.param(
5585
True,
5686
SIMPLE_REASONING,
57-
id="simple_streaming",
87+
id="simple_reasoning_streaming",
5888
),
5989
pytest.param(
6090
False,
6191
COMPLETE_REASONING,
62-
id="complete_streaming",
92+
id="complete_reasoning",
6393
),
6494
pytest.param(
6595
True,
6696
COMPLETE_REASONING,
67-
id="complete_streaming",
97+
id="complete_reasoning_streaming",
6898
),
6999
pytest.param(
70100
False,
71101
NO_REASONING,
72-
id="no_streaming",
102+
id="no_reasoning_token",
73103
),
74104
pytest.param(
75105
True,
76-
NO_REASONING,
77-
id="no_streaming",
106+
NO_REASONING_STREAMING,
107+
id="no_reasoning_token_streaming",
78108
),
79109
pytest.param(
80110
False,
81111
MULTIPLE_LINES,
82-
id="multiple_lines_streaming",
112+
id="multiple_lines",
83113
),
84114
pytest.param(
85115
True,
@@ -89,23 +119,65 @@
89119
pytest.param(
90120
True,
91121
SHORTEST_REASONING,
92-
id="shortest_streaming",
122+
id="shortest",
93123
),
94124
pytest.param(
95125
False,
96126
SHORTEST_REASONING_NO_STREAMING,
97127
id="shortest_streaming",
98128
),
129+
pytest.param(
130+
False,
131+
REASONING_WITH_THINK,
132+
id="reasoning_with_think",
133+
),
134+
pytest.param(
135+
True,
136+
REASONING_WITH_THINK,
137+
id="reasoning_with_think_streaming",
138+
),
139+
pytest.param(
140+
False,
141+
COMPLETE_REASONING_WITH_THINK,
142+
id="complete_reasoning_with_think",
143+
),
144+
pytest.param(
145+
True,
146+
COMPLETE_REASONING_WITH_THINK,
147+
id="complete_reasoning_with_think_streaming",
148+
),
149+
pytest.param(
150+
False,
151+
MULTIPLE_LINES_WITH_THINK,
152+
id="multiple_lines_with_think",
153+
),
154+
pytest.param(
155+
True,
156+
MULTIPLE_LINES_WITH_THINK,
157+
id="multiple_lines_with_think_streaming",
158+
),
159+
pytest.param(
160+
False,
161+
SHORTEST_REASONING_NO_STREAMING_WITH_THINK,
162+
id="shortest_with_think",
163+
),
164+
pytest.param(
165+
True,
166+
SHORTEST_REASONING_WITH_THINK,
167+
id="shortest_with_think_streaming",
168+
),
99169
]
100170

171+
# Global tokenizer initialization to avoid repeated loading
172+
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m")
173+
tokenizer.add_tokens([start_token, end_token])
174+
101175

102176
@pytest.mark.parametrize("streaming, param_dict", TEST_CASES)
103177
def test_reasoning(
104178
streaming: bool,
105179
param_dict: dict,
106180
):
107-
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m")
108-
tokenizer.add_tokens([start_token, end_token])
109181
output = tokenizer.tokenize(param_dict["output"])
110182
# decode everything to tokens
111183
output_tokens: List[str] = [

Diff for: vllm/entrypoints/openai/reasoning_parsers/deepseek_r1_reasoning_parser.py

+35-23
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,8 @@ def extract_reasoning_content_streaming(
6767
]):
6868
return None
6969

70+
# Check if <think> is present in previous or delta.
71+
# Keep compatibility with models that don't generate <think> tokens.
7072
if self.think_start_token_id in previous_token_ids:
7173
if self.think_end_token_id in delta_token_ids:
7274
# <think> in previous, </think> in delta,
@@ -85,7 +87,6 @@ def extract_reasoning_content_streaming(
8587
# reasoning content continues
8688
return DeltaMessage(reasoning_content=delta_text)
8789
elif self.think_start_token_id in delta_token_ids:
88-
logger.info(delta_text)
8990
if self.think_end_token_id in delta_token_ids:
9091
# <think> in delta, </think> in delta, extract reasoning content
9192
start_index = delta_text.find(self.think_start_token)
@@ -101,35 +102,46 @@ def extract_reasoning_content_streaming(
101102
# reasoning content continues
102103
return DeltaMessage(reasoning_content=delta_text)
103104
else:
104-
# No <think> in previous or delta, reasoning content continues.
105-
return DeltaMessage(content=delta_text)
105+
# No <think> in previous or delta, also need to check for </think>.
106+
# Because the model may have generated </think> without <think>
107+
# Ref https://huggingface.co/deepseek-ai/DeepSeek-R1/commit/8a58a132790c9935686eb97f042afa8013451c9f
108+
if self.think_end_token_id in delta_token_ids:
109+
# </think> in delta with more tokens,
110+
# extract reasoning content and content
111+
end_index = delta_text.find(self.think_end_token)
112+
reasoning_content = delta_text[:end_index]
113+
content = delta_text[end_index + len(self.think_end_token):]
114+
return DeltaMessage(reasoning_content=reasoning_content,
115+
content=content if content else None)
116+
elif self.think_end_token_id in previous_token_ids:
117+
# </think> in previous, thinking content ends
118+
return DeltaMessage(content=delta_text)
119+
else:
120+
# no </think> in previous or delta, reasoning content continues
121+
return DeltaMessage(reasoning_content=delta_text)
106122

107123
def extract_reasoning_content(
108124
self, model_output: str, request: ChatCompletionRequest
109125
) -> Tuple[Optional[str], Optional[str]]:
110126

111-
# Check if the model output contains the <think> tokens.
112-
if (self.think_start_token not in model_output
113-
or self.think_end_token not in model_output):
127+
# DeepSeek R1 doesn't generate <think> now.
128+
# Thus we assume the reasoning content is always at the start.
129+
# Ref https://huggingface.co/deepseek-ai/DeepSeek-R1/commit/8a58a132790c9935686eb97f042afa8013451c9f
130+
if self.think_end_token not in model_output:
114131
return None, model_output
115132
else:
133+
# Add a start token if it's missing to keep compatibility.
134+
if self.think_start_token not in model_output:
135+
model_output = f"{self.think_start_token}{model_output}"
116136
# Use a regex to find the reasoning content
117137
reasoning_content = self.reasoning_regex.findall(model_output)[0]
118138

119-
# Remove the reasoning content from the model output
120-
# Although deepseek's <think> token is always at the
121-
# beginning of the line, we cannot guarantee that the
122-
# other models will follow this convention.
123-
# Therefore, we need to add :start_index.
124-
start_index = model_output.find(self.think_start_token)
125-
if start_index != -1:
126-
end_index = start_index + len(
127-
f"{self.think_start_token}{reasoning_content}{self.think_end_token}"
128-
)
129-
model_output = model_output[:start_index] + \
130-
model_output[end_index:]
131-
132-
if len(model_output) == 0:
133-
return reasoning_content, None
134-
135-
return reasoning_content, model_output
139+
end_index = len(
140+
f"{self.think_start_token}{reasoning_content}{self.think_end_token}"
141+
)
142+
final_output = model_output[end_index:]
143+
144+
if len(final_output) == 0:
145+
return reasoning_content, None
146+
147+
return reasoning_content, final_output

0 commit comments

Comments
 (0)