Skip to content

Commit 4b8d434

Browse files
robertgshaw2-redhatmzusman
authored andcommitted
[Frontend] Improve StreamingResponse Exception Handling (vllm-project#11752)
1 parent 314b405 commit 4b8d434

File tree

2 files changed

+3
-3
lines changed

2 files changed

+3
-3
lines changed

vllm/entrypoints/openai/serving_chat.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -301,7 +301,7 @@ async def chat_completion_stream_generator(
301301
] * num_choices
302302
else:
303303
tool_parsers = [None] * num_choices
304-
except RuntimeError as e:
304+
except Exception as e:
305305
logger.exception("Error in tool parser creation.")
306306
data = self.create_streaming_error_response(str(e))
307307
yield f"data: {data}\n\n"
@@ -591,7 +591,7 @@ async def chat_completion_stream_generator(
591591
completion_tokens=num_completion_tokens,
592592
total_tokens=num_prompt_tokens + num_completion_tokens)
593593

594-
except ValueError as e:
594+
except Exception as e:
595595
# TODO: Use a vllm-specific Validation Error
596596
logger.exception("Error in chat completion stream generator.")
597597
data = self.create_streaming_error_response(str(e))

vllm/entrypoints/openai/serving_completion.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -371,7 +371,7 @@ async def completion_stream_generator(
371371
# report to FastAPI middleware aggregate usage across all choices
372372
request_metadata.final_usage_info = final_usage_info
373373

374-
except ValueError as e:
374+
except Exception as e:
375375
# TODO: Use a vllm-specific Validation Error
376376
data = self.create_streaming_error_response(str(e))
377377
yield f"data: {data}\n\n"

0 commit comments

Comments
 (0)