Skip to content

Commit 37fd59d

Browse files
refactor: clean code
1 parent 1c91107 commit 37fd59d

File tree

4 files changed

+80
-45
lines changed

4 files changed

+80
-45
lines changed

templates/components/services/python/suggestion.py

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def get_configured_prompt(cls) -> Optional[str]:
2424
return PromptTemplate(prompt)
2525

2626
@classmethod
27-
async def suggest_next_questions(
27+
async def suggest_next_questions_all_messages(
2828
cls,
2929
messages: List[Message],
3030
) -> Optional[List[str]]:
@@ -64,3 +64,15 @@ def _extract_questions(cls, text: str) -> List[str]:
6464
content_match = re.search(r"```(.*?)```", text, re.DOTALL)
6565
content = content_match.group(1) if content_match else ""
6666
return content.strip().split("\n")
67+
68+
@classmethod
69+
async def suggest_next_questions(
70+
cls,
71+
chat_history: List[Message],
72+
response: str,
73+
) -> List[str]:
74+
"""
75+
Suggest the next questions that user might ask based on the chat history and the last response
76+
"""
77+
messages = chat_history + [Message(role="assistant", content=response)]
78+
return await cls.suggest_next_questions_all_messages(messages)

templates/types/multiagent/fastapi/app/api/routers/vercel_response.py

Lines changed: 31 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import json
22
import logging
33
from asyncio import Task
4-
from typing import AsyncGenerator
4+
from typing import AsyncGenerator, List
55

66
from aiostream import stream
77
from app.agents.single import AgentRunEvent, AgentRunResult
@@ -61,38 +61,31 @@ async def _chat_response_generator():
6161

6262
if isinstance(result, AgentRunResult):
6363
for token in result.response.message.content:
64-
yield VercelStreamResponse.convert_text(token)
64+
final_response += token
65+
yield cls.convert_text(token)
6566

6667
if isinstance(result, AsyncGenerator):
6768
async for token in result:
6869
final_response += token.delta
69-
yield VercelStreamResponse.convert_text(token.delta)
70+
yield cls.convert_text(token.delta)
7071

7172
# Generate next questions if next question prompt is configured
72-
if NextQuestionSuggestion.get_configured_prompt() is not None:
73-
conversation = chat_data.messages + [
74-
Message(role="assistant", content=final_response)
75-
]
76-
questions = await NextQuestionSuggestion.suggest_next_questions(
77-
conversation
78-
)
79-
if questions:
80-
yield VercelStreamResponse.convert_data(
81-
{
82-
"type": "suggested_questions",
83-
"data": questions,
84-
}
85-
)
73+
question_data = await cls._generate_next_questions(
74+
chat_data.messages, final_response
75+
)
76+
if question_data:
77+
yield cls.convert_data(question_data)
78+
8679
# TODO: stream sources
8780

8881
# Yield the events from the event handler
8982
async def _event_generator():
9083
async for event in events():
91-
event_response = _event_to_response(event)
84+
event_response = cls._event_to_response(event)
9285
if verbose:
9386
logger.debug(event_response)
9487
if event_response is not None:
95-
yield VercelStreamResponse.convert_data(event_response)
88+
yield cls.convert_data(event_response)
9689

9790
combine = stream.merge(_chat_response_generator(), _event_generator())
9891

@@ -101,16 +94,28 @@ async def _event_generator():
10194
if not is_stream_started:
10295
is_stream_started = True
10396
# Stream a blank message to start the stream
104-
yield VercelStreamResponse.convert_text("")
97+
yield cls.convert_text("")
10598

10699
async for output in streamer:
107100
yield output
108101
if await request.is_disconnected():
109102
break
110103

111-
112-
def _event_to_response(event: AgentRunEvent) -> dict:
113-
return {
114-
"type": "agent",
115-
"data": {"agent": event.name, "text": event.msg},
116-
}
104+
@staticmethod
105+
def _event_to_response(event: AgentRunEvent) -> dict:
106+
return {
107+
"type": "agent",
108+
"data": {"agent": event.name, "text": event.msg},
109+
}
110+
111+
@staticmethod
112+
async def _generate_next_questions(chat_history: List[Message], response: str):
113+
questions = await NextQuestionSuggestion.suggest_next_questions(
114+
chat_history, response
115+
)
116+
if questions:
117+
return {
118+
"type": "suggested_questions",
119+
"data": questions,
120+
}
121+
return None

templates/types/multiagent/fastapi/app/api/services/suggestion.py

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ class NextQuestions(BaseModel):
2626

2727
class NextQuestionSuggestion:
2828
@staticmethod
29-
async def suggest_next_questions(
29+
async def suggest_next_questions_all_messages(
3030
messages: List[Message],
3131
number_of_questions: int = N_QUESTION_TO_GENERATE,
3232
) -> List[str]:
@@ -58,3 +58,17 @@ async def suggest_next_questions(
5858
except Exception as e:
5959
logger.error(f"Error when generating next question: {e}")
6060
return []
61+
62+
@staticmethod
63+
async def suggest_next_questions(
64+
chat_history: List[Message],
65+
response: str,
66+
number_of_questions: int = N_QUESTION_TO_GENERATE,
67+
) -> List[str]:
68+
"""
69+
Suggest the next questions that user might ask based on the chat history and the last response
70+
"""
71+
messages = chat_history + [Message(role="assistant", content=response)]
72+
return await NextQuestionSuggestion.suggest_next_questions_all_messages(
73+
messages, number_of_questions
74+
)

templates/types/streaming/fastapi/app/api/routers/vercel_response.py

Lines changed: 21 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import json
2+
from typing import List
23

34
from aiostream import stream
45
from fastapi import Request
@@ -54,23 +55,14 @@ async def _chat_response_generator():
5455
final_response = ""
5556
async for token in response.async_response_gen():
5657
final_response += token
57-
yield VercelStreamResponse.convert_text(token)
58+
yield cls.convert_text(token)
5859

5960
# Generate next questions if next question prompt is configured
60-
if NextQuestionSuggestion.get_configured_prompt() is not None:
61-
conversation = chat_data.messages + [
62-
Message(role="assistant", content=final_response)
63-
]
64-
questions = await NextQuestionSuggestion.suggest_next_questions(
65-
conversation
66-
)
67-
if questions:
68-
yield VercelStreamResponse.convert_data(
69-
{
70-
"type": "suggested_questions",
71-
"data": questions,
72-
}
73-
)
61+
question_data = await cls._generate_next_questions(
62+
chat_data.messages, final_response
63+
)
64+
if question_data:
65+
yield cls.convert_data(question_data)
7466

7567
# the text_generator is the leading stream, once it's finished, also finish the event stream
7668
event_handler.is_done = True
@@ -93,7 +85,7 @@ async def _event_generator():
9385
async for event in event_handler.async_event_gen():
9486
event_response = event.to_response()
9587
if event_response is not None:
96-
yield VercelStreamResponse.convert_data(event_response)
88+
yield cls.convert_data(event_response)
9789

9890
combine = stream.merge(_chat_response_generator(), _event_generator())
9991
is_stream_started = False
@@ -102,9 +94,21 @@ async def _event_generator():
10294
if not is_stream_started:
10395
is_stream_started = True
10496
# Stream a blank message to start the stream
105-
yield VercelStreamResponse.convert_text("")
97+
yield cls.convert_text("")
10698

10799
yield output
108100

109101
if await request.is_disconnected():
110102
break
103+
104+
@staticmethod
105+
async def _generate_next_questions(chat_history: List[Message], response: str):
106+
questions = await NextQuestionSuggestion.suggest_next_questions(
107+
chat_history, response
108+
)
109+
if questions:
110+
return {
111+
"type": "suggested_questions",
112+
"data": questions,
113+
}
114+
return None

0 commit comments

Comments
 (0)