Skip to content

Commit 224d413

Browse files
committed
Merge remote-tracking branch 'origin/main' into fix/fix-llamacloud-env
2 parents c71a6e8 + 88220f1 commit 224d413

File tree

7 files changed

+42
-24
lines changed

7 files changed

+42
-24
lines changed

.changeset/selfish-lemons-kiss.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"create-llama": patch
3+
---
4+
5+
fix workflow doesn't stop when user presses stop generation button

.changeset/modern-lemons-applaud.md renamed to .changeset/tiny-items-divide.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,4 @@
22
"create-llama": patch
33
---
44

5-
bump: use LlamaIndexTS 0.6.18
5+

CHANGELOG.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,12 @@
11
# create-llama
22

3+
## 0.2.17
4+
5+
### Patch Changes
6+
7+
- cd3fcd0: bump: use LlamaIndexTS 0.6.18
8+
- 6335de1: Fix using LlamaCloud selector does not use the configured values in the environment (Python)
9+
310
## 0.2.16
411

512
### Patch Changes

package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "create-llama",
3-
"version": "0.2.16",
3+
"version": "0.2.17",
44
"description": "Create LlamaIndex-powered apps with one command",
55
"keywords": [
66
"rag",

templates/components/multiagent/python/app/api/routers/chat.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
import logging
22

3-
from app.api.routers.events import EventCallbackHandler
43
from app.api.routers.models import (
54
ChatData,
65
)
@@ -23,7 +22,6 @@ async def chat(
2322
last_message_content = data.get_last_message_content()
2423
messages = data.get_history_messages(include_agent_messages=True)
2524

26-
event_handler = EventCallbackHandler()
2725
# The chat API supports passing private document filters and chat params
2826
# but agent workflow does not support them yet
2927
# ignore chat params and use all documents for now

templates/components/multiagent/python/app/api/routers/vercel_response.py

Lines changed: 27 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1+
import asyncio
12
import json
23
import logging
3-
from abc import ABC
44
from typing import AsyncGenerator, List
55

66
from aiostream import stream
@@ -13,7 +13,7 @@
1313
logger = logging.getLogger("uvicorn")
1414

1515

16-
class VercelStreamResponse(StreamingResponse, ABC):
16+
class VercelStreamResponse(StreamingResponse):
1717
"""
1818
Base class to convert the response from the chat engine to the streaming format expected by Vercel
1919
"""
@@ -23,26 +23,34 @@ class VercelStreamResponse(StreamingResponse, ABC):
2323

2424
def __init__(self, request: Request, chat_data: ChatData, *args, **kwargs):
2525
self.request = request
26-
27-
stream = self._create_stream(request, chat_data, *args, **kwargs)
28-
content = self.content_generator(stream)
29-
26+
self.chat_data = chat_data
27+
content = self.content_generator(*args, **kwargs)
3028
super().__init__(content=content)
3129

32-
async def content_generator(self, stream):
30+
async def content_generator(self, event_handler, events):
31+
logger.info("Starting content_generator")
32+
stream = self._create_stream(
33+
self.request, self.chat_data, event_handler, events
34+
)
3335
is_stream_started = False
34-
35-
async with stream.stream() as streamer:
36-
async for output in streamer:
37-
if not is_stream_started:
38-
is_stream_started = True
39-
# Stream a blank message to start the stream
40-
yield self.convert_text("")
41-
42-
yield output
43-
44-
if await self.request.is_disconnected():
45-
break
36+
try:
37+
async with stream.stream() as streamer:
38+
async for output in streamer:
39+
if not is_stream_started:
40+
is_stream_started = True
41+
# Stream a blank message to start the stream
42+
yield self.convert_text("")
43+
44+
yield output
45+
except asyncio.CancelledError:
46+
logger.info("Stopping workflow")
47+
await event_handler.cancel_run()
48+
except Exception as e:
49+
logger.error(
50+
f"Unexpected error in content_generator: {str(e)}", exc_info=True
51+
)
52+
finally:
53+
logger.info("The stream has been stopped!")
4654

4755
def _create_stream(
4856
self,

templates/types/streaming/fastapi/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ uvicorn = { extras = ["standard"], version = "^0.23.2" }
1515
python-dotenv = "^1.0.0"
1616
aiostream = "^0.5.2"
1717
cachetools = "^5.3.3"
18-
llama-index = "0.11.6"
18+
llama-index = "^0.11.17"
1919

2020
[tool.poetry.group.dev.dependencies]
2121
mypy = "^1.8.0"

0 commit comments

Comments
 (0)