Skip to content

Commit d9547da

Browse files
updated
1 parent eba1717 commit d9547da

File tree

6 files changed

+21
-21
lines changed

6 files changed

+21
-21
lines changed

vllm/entrypoints/openai/serving_chat.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ async def create_chat_completion(
171171
truncate_prompt_tokens=request.truncate_prompt_tokens,
172172
add_special_tokens=request.add_special_tokens,
173173
)
174-
except ValueError as e:
174+
except Exception as e:
175175
logger.exception("Error in preprocessing prompt inputs")
176176
return self.create_error_response(str(e))
177177

@@ -228,7 +228,7 @@ async def create_chat_completion(
228228
)
229229

230230
generators.append(generator)
231-
except ValueError as e:
231+
except Exception as e:
232232
# TODO: Use a vllm-specific Validation Error
233233
return self.create_error_response(str(e))
234234

@@ -245,7 +245,7 @@ async def create_chat_completion(
245245
return await self.chat_completion_full_generator(
246246
request, result_generator, request_id, model_name,
247247
conversation, tokenizer, request_metadata)
248-
except ValueError as e:
248+
except Exception as e:
249249
# TODO: Use a vllm-specific Validation Error
250250
return self.create_error_response(str(e))
251251

@@ -301,7 +301,7 @@ async def chat_completion_stream_generator(
301301
] * num_choices
302302
else:
303303
tool_parsers = [None] * num_choices
304-
except RuntimeError as e:
304+
except Exception as e:
305305
logger.exception("Error in tool parser creation.")
306306
data = self.create_streaming_error_response(str(e))
307307
yield f"data: {data}\n\n"
@@ -591,7 +591,7 @@ async def chat_completion_stream_generator(
591591
completion_tokens=num_completion_tokens,
592592
total_tokens=num_prompt_tokens + num_completion_tokens)
593593

594-
except ValueError as e:
594+
except Exception as e:
595595
# TODO: Use a vllm-specific Validation Error
596596
logger.exception("Error in chat completion stream generator.")
597597
data = self.create_streaming_error_response(str(e))
@@ -618,7 +618,7 @@ async def chat_completion_full_generator(
618618
final_res = res
619619
except asyncio.CancelledError:
620620
return self.create_error_response("Client disconnected")
621-
except ValueError as e:
621+
except Exception as e:
622622
# TODO: Use a vllm-specific Validation Error
623623
return self.create_error_response(str(e))
624624

@@ -682,7 +682,7 @@ async def chat_completion_full_generator(
682682

683683
try:
684684
tool_parser = self.tool_parser(tokenizer)
685-
except RuntimeError as e:
685+
except Exception as e:
686686
logger.exception("Error in tool parser creation.")
687687
return self.create_error_response(str(e))
688688

vllm/entrypoints/openai/serving_completion.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ async def create_completion(
106106
truncate_prompt_tokens=request.truncate_prompt_tokens,
107107
add_special_tokens=request.add_special_tokens,
108108
)
109-
except ValueError as e:
109+
except Exception as e:
110110
logger.exception("Error in preprocessing prompt inputs")
111111
return self.create_error_response(str(e))
112112

@@ -158,7 +158,7 @@ async def create_completion(
158158
)
159159

160160
generators.append(generator)
161-
except ValueError as e:
161+
except Exception as e:
162162
# TODO: Use a vllm-specific Validation Error
163163
return self.create_error_response(str(e))
164164

@@ -215,7 +215,7 @@ async def create_completion(
215215
)
216216
except asyncio.CancelledError:
217217
return self.create_error_response("Client disconnected")
218-
except ValueError as e:
218+
except Exception as e:
219219
# TODO: Use a vllm-specific Validation Error
220220
return self.create_error_response(str(e))
221221

@@ -371,7 +371,7 @@ async def completion_stream_generator(
371371
# report to FastAPI middleware aggregate usage across all choices
372372
request_metadata.final_usage_info = final_usage_info
373373

374-
except ValueError as e:
374+
except Exception as e:
375375
# TODO: Use a vllm-specific Validation Error
376376
data = self.create_streaming_error_response(str(e))
377377
yield f"data: {data}\n\n"

vllm/entrypoints/openai/serving_embedding.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ async def create_embedding(
136136
truncate_prompt_tokens=truncate_prompt_tokens,
137137
add_special_tokens=request.add_special_tokens,
138138
)
139-
except ValueError as e:
139+
except Exception as e:
140140
logger.exception("Error in preprocessing prompt inputs")
141141
return self.create_error_response(str(e))
142142

@@ -167,7 +167,7 @@ async def create_embedding(
167167
)
168168

169169
generators.append(generator)
170-
except ValueError as e:
170+
except Exception as e:
171171
# TODO: Use a vllm-specific Validation Error
172172
return self.create_error_response(str(e))
173173

@@ -196,7 +196,7 @@ async def create_embedding(
196196
)
197197
except asyncio.CancelledError:
198198
return self.create_error_response("Client disconnected")
199-
except ValueError as e:
199+
except Exception as e:
200200
# TODO: Use a vllm-specific Validation Error
201201
return self.create_error_response(str(e))
202202

vllm/entrypoints/openai/serving_pooling.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ async def create_pooling(
132132
truncate_prompt_tokens=truncate_prompt_tokens,
133133
add_special_tokens=request.add_special_tokens,
134134
)
135-
except ValueError as e:
135+
except Exception as e:
136136
logger.exception("Error in preprocessing prompt inputs")
137137
return self.create_error_response(str(e))
138138

@@ -163,7 +163,7 @@ async def create_pooling(
163163
)
164164

165165
generators.append(generator)
166-
except ValueError as e:
166+
except Exception as e:
167167
# TODO: Use a vllm-specific Validation Error
168168
return self.create_error_response(str(e))
169169

@@ -192,7 +192,7 @@ async def create_pooling(
192192
)
193193
except asyncio.CancelledError:
194194
return self.create_error_response("Client disconnected")
195-
except ValueError as e:
195+
except Exception as e:
196196
# TODO: Use a vllm-specific Validation Error
197197
return self.create_error_response(str(e))
198198

vllm/entrypoints/openai/serving_score.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ async def create_score(
101101
if not self.model_config.is_cross_encoder:
102102
raise ValueError("Model is not cross encoder.")
103103

104-
except ValueError as e:
104+
except Exception as e:
105105
logger.exception("Error in preprocessing prompt inputs")
106106
return self.create_error_response(str(e))
107107

@@ -155,7 +155,7 @@ async def create_score(
155155
)
156156

157157
generators.append(generator)
158-
except ValueError as e:
158+
except Exception as e:
159159
# TODO: Use a vllm-specific Validation Error
160160
return self.create_error_response(str(e))
161161

@@ -184,7 +184,7 @@ async def create_score(
184184
)
185185
except asyncio.CancelledError:
186186
return self.create_error_response("Client disconnected")
187-
except ValueError as e:
187+
except Exception as e:
188188
# TODO: Use a vllm-specific Validation Error
189189
return self.create_error_response(str(e))
190190

vllm/entrypoints/openai/serving_tokenization.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ async def create_tokenize(
8686
request.prompt,
8787
add_special_tokens=request.add_special_tokens,
8888
)
89-
except ValueError as e:
89+
except Exception as e:
9090
logger.exception("Error in preprocessing prompt inputs")
9191
return self.create_error_response(str(e))
9292

0 commit comments

Comments
 (0)