Skip to content

Commit a9ff7d0

Browse files
committed
refactor
1 parent a82424a commit a9ff7d0

File tree

6 files changed

+35
-228
lines changed

6 files changed

+35
-228
lines changed

helpers/env-variables.ts

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -486,26 +486,26 @@ It\\'s cute animal.
486486
return systemPromptEnv;
487487
};
488488

489-
const getTemplateEnvs = (template?: TemplateType): EnvVar[] => {
489+
const getTemplateEnvs = (
490+
template?: TemplateType,
491+
framework?: TemplateFramework,
492+
): EnvVar[] => {
490493
const nextQuestionEnvs: EnvVar[] = [
491-
{
492-
name: "NEXT_QUESTION_ENABLE",
493-
description: "Whether to show next question suggestions",
494-
value: "true",
495-
},
496494
{
497495
name: "NEXT_QUESTION_PROMPT",
498496
description: `Customize prompt to generate the next question suggestions based on the conversation history.
499-
Default prompt is:
500-
NEXT_QUESTION_PROMPT=# You're a helpful assistant! Your task is to suggest the next question that user might ask.
501-
# Here is the conversation history
502-
# ---------------------\n{conversation}\n---------------------
503-
# Given the conversation history, please give me 3 questions that you might ask next!
504-
`,
497+
Disable this prompt to disable the next question suggestions feature.`,
498+
value: `"You're a helpful assistant! Your task is to suggest the next question that user might ask.
499+
Here is the conversation history
500+
---------------------\n{conversation}\n---------------------
501+
Given the conversation history, please give me 3 questions that you might ask next!"`,
505502
},
506503
];
507504

508-
if (template === "multiagent" || template === "streaming") {
505+
if (
506+
framework === "fastapi" &&
507+
(template === "multiagent" || template === "streaming")
508+
) {
509509
return nextQuestionEnvs;
510510
}
511511
return [];
@@ -555,7 +555,7 @@ export const createBackendEnvFile = async (
555555
...getVectorDBEnvs(opts.vectorDb, opts.framework),
556556
...getFrameworkEnvs(opts.framework, opts.externalPort),
557557
...getToolEnvs(opts.tools),
558-
...getTemplateEnvs(opts.template),
558+
...getTemplateEnvs(opts.template, opts.framework),
559559
...getObservabilityEnvs(opts.observability),
560560
...getSystemPromptEnv(opts.tools, opts.dataSources, opts.framework),
561561
];

templates/components/services/python/suggestion.py

Lines changed: 12 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,51 +1,38 @@
11
import logging
2+
import os
23
from typing import List, Optional
34

5+
from app.api.routers.models import Message
46
from llama_index.core.prompts import PromptTemplate
57
from llama_index.core.settings import Settings
68
from pydantic import BaseModel
7-
from pydantic_settings import BaseSettings, SettingsConfigDict
8-
9-
from app.api.routers.models import Message
109

1110
logger = logging.getLogger("uvicorn")
1211

1312

14-
class NextQuestionSettings(BaseSettings):
15-
enable: bool = True
16-
prompt_template: str = (
17-
"You're a helpful assistant! Your task is to suggest the next question that user might ask. "
18-
"\nHere is the conversation history"
19-
"\n---------------------\n{conversation}\n---------------------"
20-
"Given the conversation history, please give me 3 questions that you might ask next!"
21-
)
22-
23-
model_config = SettingsConfigDict(env_prefix="NEXT_QUESTION_")
24-
25-
@property
26-
def prompt(self) -> PromptTemplate:
27-
return PromptTemplate(self.prompt_template)
28-
29-
30-
next_question_settings = NextQuestionSettings()
31-
32-
3313
class NextQuestions(BaseModel):
3414
"""A list of questions that user might ask next"""
3515

3616
questions: List[str]
3717

3818

3919
class NextQuestionSuggestion:
40-
@staticmethod
20+
21+
@classmethod
22+
def get_configured_prompt(cls) -> Optional[str]:
23+
return os.getenv("NEXT_QUESTION_PROMPT", None)
24+
25+
@classmethod
4126
async def suggest_next_questions(
27+
cls,
4228
messages: List[Message],
4329
) -> Optional[List[str]]:
4430
"""
4531
Suggest the next questions that user might ask based on the conversation history
4632
Return None if suggestion is disabled or there is an error
4733
"""
48-
if not next_question_settings.enable:
34+
prompt_template = cls.get_configured_prompt()
35+
if not prompt_template:
4936
return None
5037

5138
try:
@@ -63,7 +50,7 @@ async def suggest_next_questions(
6350

6451
output: NextQuestions = await Settings.llm.astructured_predict(
6552
NextQuestions,
66-
prompt=next_question_settings.prompt,
53+
prompt=PromptTemplate(prompt_template),
6754
conversation=conversation,
6855
)
6956

templates/types/multiagent/fastapi/app/api/routers/vercel_response.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,15 @@
1-
from asyncio import Task
21
import json
32
import logging
3+
from asyncio import Task
44
from typing import AsyncGenerator
55

66
from aiostream import stream
7+
from app.agents.single import AgentRunEvent, AgentRunResult
8+
from app.api.routers.models import ChatData, Message
9+
from app.api.services.suggestion import NextQuestionSuggestion
710
from fastapi import Request
811
from fastapi.responses import StreamingResponse
912

10-
from app.api.routers.models import ChatData, Message
11-
from app.agents.single import AgentRunEvent, AgentRunResult
12-
from app.api.services.suggestion import NextQuestionSuggestion, next_question_settings
13-
1413
logger = logging.getLogger("uvicorn")
1514

1615

@@ -69,8 +68,8 @@ async def _chat_response_generator():
6968
final_response += token.delta
7069
yield VercelStreamResponse.convert_text(token.delta)
7170

72-
# Generate questions that user might be interested in
73-
if next_question_settings.enable:
71+
# Generate next questions if next question prompt is configured
72+
if NextQuestionSuggestion.get_configured_prompt() is not None:
7473
conversation = chat_data.messages + [
7574
Message(role="assistant", content=final_response)
7675
]

templates/types/streaming/fastapi/app/api/routers/vercel_response.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
from app.api.routers.events import EventCallbackHandler
99
from app.api.routers.models import ChatData, Message, SourceNodes
10-
from app.api.services.suggestion import NextQuestionSuggestion, next_question_settings
10+
from app.api.services.suggestion import NextQuestionSuggestion
1111

1212

1313
class VercelStreamResponse(StreamingResponse):
@@ -56,8 +56,8 @@ async def _chat_response_generator():
5656
final_response += token
5757
yield VercelStreamResponse.convert_text(token)
5858

59-
# Generate questions that user might be interested in
60-
if next_question_settings.enable:
59+
# Generate next questions if next question prompt is configured
60+
if NextQuestionSuggestion.get_configured_prompt() is not None:
6161
conversation = chat_data.messages + [
6262
Message(role="assistant", content=final_response)
6363
]

templates/types/streaming/fastapi/app/api/services/file.py

Lines changed: 0 additions & 119 deletions
This file was deleted.

templates/types/streaming/fastapi/app/api/services/suggestion.py

Lines changed: 0 additions & 60 deletions
This file was deleted.

0 commit comments

Comments
 (0)