|
4 | 4 |
|
5 | 5 | from app.api.callbacks.llamacloud import LlamaCloudFileDownload
|
6 | 6 | from app.api.callbacks.next_question import SuggestNextQuestions
|
7 |
| -from app.api.callbacks.stream_handler import StreamHandler |
8 | 7 | from app.api.callbacks.source_nodes import AddNodeUrl
|
| 8 | +from app.api.callbacks.stream_handler import StreamHandler |
9 | 9 | from app.api.routers.models import (
|
10 | 10 | ChatData,
|
11 | 11 | )
|
@@ -50,8 +50,41 @@ async def chat(
|
50 | 50 | ],
|
51 | 51 | ).vercel_stream()
|
52 | 52 | except Exception as e:
|
53 |
| - logger.exception("Error in chat engine", exc_info=True) |
| 53 | + logger.exception("Error in chat", exc_info=True) |
| 54 | + raise HTTPException( |
| 55 | + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, |
| 56 | + detail=f"Error in chat: {e}", |
| 57 | + ) from e |
| 58 | + |
| 59 | + |
| 60 | +# non-streaming endpoint - delete if not needed |
| 61 | +@r.post("/request") |
| 62 | +async def chat_request( |
| 63 | + request: Request, |
| 64 | + data: ChatData, |
| 65 | +): |
| 66 | + try: |
| 67 | + last_message_content = data.get_last_message_content() |
| 68 | + messages = data.get_history_messages(include_agent_messages=True) |
| 69 | + |
| 70 | + doc_ids = data.get_chat_document_ids() |
| 71 | + filters = generate_filters(doc_ids) |
| 72 | + params = data.data or {} |
| 73 | + |
| 74 | + workflow = create_workflow( |
| 75 | + params=params, |
| 76 | + filters=filters, |
| 77 | + ) |
| 78 | + |
| 79 | + handler = workflow.run( |
| 80 | + user_msg=last_message_content, |
| 81 | + chat_history=messages, |
| 82 | + stream=False, |
| 83 | + ) |
| 84 | + return await handler |
| 85 | + except Exception as e: |
| 86 | + logger.exception("Error in chat request", exc_info=True) |
54 | 87 | raise HTTPException(
|
55 | 88 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
56 |
| - detail=f"Error in chat engine: {e}", |
| 89 | + detail=f"Error in chat request: {e}", |
57 | 90 | ) from e
|
0 commit comments