Skip to content

Commit 4fdfca5

Browse files
committed
Refactor artifact workflows and UI components
- Updated `code_workflow.py` and `document_workflow.py` to improve chat history handling and user message storage. - Enhanced `ArtifactWorkflow` to utilize optional fields in the `Requirement` model. - Revised prompt instructions for clarity and conciseness in generating requirements. - Modified UI event components to reflect changes in workflow stages and improve user feedback. - Improved error handling for JSON parsing in artifact annotations.
1 parent f975b79 commit 4fdfca5

File tree

4 files changed

+116
-108
lines changed

4 files changed

+116
-108
lines changed

llama-index-server/examples/artifact/code_workflow.py

Lines changed: 97 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import re
22
import time
3-
from typing import Any, Literal, Optional
3+
from typing import Any, Literal, Optional, Union
44

55
from pydantic import BaseModel
66

@@ -27,8 +27,9 @@
2727

2828

2929
class Requirement(BaseModel):
30-
language: str
31-
file_name: str
30+
next_step: Literal["answering", "coding"]
31+
language: Optional[str] = None
32+
file_name: Optional[str] = None
3233
requirement: str
3334

3435

@@ -42,13 +43,12 @@ class GenerateArtifactEvent(Event):
4243

4344

4445
class SynthesizeAnswerEvent(Event):
45-
requirement: Requirement
46-
generated_artifact: str
46+
pass
4747

4848

4949
class UIEventData(BaseModel):
5050
state: Literal["plan", "generate", "completed"]
51-
requirement: Optional[str]
51+
requirement: Optional[str] = None
5252

5353

5454
class ArtifactWorkflow(Workflow):
@@ -80,13 +80,19 @@ async def prepare_chat_history(self, ctx: Context, ev: StartEvent) -> PlanEvent:
8080
user_msg = ev.user_msg
8181
if user_msg is None:
8282
raise ValueError("user_msg is required to run the workflow")
83+
await ctx.set("user_msg", user_msg)
8384
chat_history = ev.chat_history or []
85+
chat_history.append(
86+
ChatMessage(
87+
role="user",
88+
content=user_msg,
89+
)
90+
)
8491
if self.last_artifact:
8592
chat_history.append(
8693
ChatMessage(
8794
role="user",
88-
content="Here is the current artifact: \n"
89-
+ self.last_artifact.model_dump_json(),
95+
content=f"The previous {self.last_artifact.type.value} is: \n{self.last_artifact.model_dump_json()}",
9096
)
9197
)
9298
memory = ChatMemoryBuffer.from_defaults(
@@ -96,14 +102,18 @@ async def prepare_chat_history(self, ctx: Context, ev: StartEvent) -> PlanEvent:
96102
await ctx.set("memory", memory)
97103
return PlanEvent(
98104
user_msg=user_msg,
99-
context=str(self.last_artifact.data) if self.last_artifact else "",
105+
context=str(self.last_artifact.model_dump_json())
106+
if self.last_artifact
107+
else "",
100108
)
101109

102110
@step
103-
async def planning(self, ctx: Context, event: PlanEvent) -> GenerateArtifactEvent:
111+
async def planning(
112+
self, ctx: Context, event: PlanEvent
113+
) -> Union[GenerateArtifactEvent, SynthesizeAnswerEvent]:
104114
"""
105115
Based on the conversation history and the user's request
106-
this step will help to provide a good next step for the code/document generation.
116+
this step will help to provide a good next step for the code or document generation.
107117
"""
108118
ctx.write_event_to_stream(
109119
UIEvent(
@@ -115,50 +125,63 @@ async def planning(self, ctx: Context, event: PlanEvent) -> GenerateArtifactEven
115125
)
116126
)
117127
prompt = PromptTemplate("""
118-
You are a product analyst who takes responsibility for analyzing the user request and provide requirements for code/document generation.
119-
Follow these instructions:
120-
1. Carefully analyze the conversation history and the user's request to see what has been done and what is the next step.
121-
2. From the user's request, provide requirements for the next step of the code/document generation.
122-
3. Don't be verbose, only return the requirements for the next step of the code/document generation.
123-
4. Only the following languages are allowed: "typescript", "python".
124-
5. Request should be in the format of:
125-
```json
126-
{
127-
"language": string,
128-
"file_name": string,
129-
"requirement": string
130-
}
131-
```
128+
You are a product analyst responsible for analyzing the user's request and providing the next step for code or document generation.
129+
You are helping user with their code artifact. To update the code, you need to plan a coding step.
130+
131+
Follow these instructions:
132+
1. Carefully analyze the conversation history and the user's request to determine what has been done and what the next step should be.
133+
2. The next step must be one of the following two options:
134+
- "coding": To make the changes to the current code.
135+
- "answering": If you don't need to update the current code or need clarification from the user.
136+
Important: Avoid telling the user to update the code themselves, you are the one who will update the code (by planning a coding step).
137+
3. If the next step is "coding", you may specify the language ("typescript" or "python") and file_name if known, otherwise set them to null.
138+
4. The requirement must be provided clearly what is the user request and what need to be done for the next step in details
139+
as precise and specific as possible, don't be stingy with in the requirement.
140+
5. If the next step is "answering", set language and file_name to null, and the requirement should describe what to answer or explain to the user.
141+
6. Be concise; only return the requirements for the next step.
142+
7. The requirements must be in the following format:
143+
```json
144+
{
145+
"next_step": "answering" | "coding",
146+
"language": "typescript" | "python" | null,
147+
"file_name": string | null,
148+
"requirement": string
149+
}
150+
```
132151
133-
## Example:
134-
User request: Create a calculator app.
135-
You should return:
136-
```json
137-
{
138-
"language": "typescript",
139-
"file_name": "calculator.tsx",
140-
"requirement": "Generate code for a calculator app that: Has a simple UI with a display and button layout. The display will show the current input and the result. The button should have basic operators, number, clear, and equals. The calculation should work correctly."
141-
}
142-
```
152+
## Example 1:
153+
User request: Create a calculator app.
154+
You should return:
155+
```json
156+
{
157+
"next_step": "coding",
158+
"language": "typescript",
159+
"file_name": "calculator.tsx",
160+
"requirement": "Generate code for a calculator app that has a simple UI with a display and button layout. The display should show the current input and the result. The buttons should include basic operators, numbers, clear, and equals. The calculation should work correctly."
161+
}
162+
```
143163
144-
User request: Add light/dark mode toggle to the calculator app.
145-
You should return:
146-
```json
147-
{
148-
"language": "typescript",
149-
"file_name": "calculator.tsx",
150-
"requirement": "On top of the existing code, add a light/dark mode toggle at the top right corner of the calculator app. Handle the state of the toggle in the component."
151-
}
152-
```
164+
## Example 2:
165+
User request: Explain how the game loop works.
166+
Context: You have already generated the code for a snake game.
167+
You should return:
168+
```json
169+
{
170+
"next_step": "answering",
171+
"language": null,
172+
"file_name": null,
173+
"requirement": "The user is asking about the game loop. Explain how the game loop works."
174+
}
175+
```
153176
154-
{context}
177+
{context}
155178
156-
Now, i have to planning for the user's request:
157-
{user_msg}
179+
Now, plan the user's next step for this request:
180+
{user_msg}
158181
""").format(
159182
context=""
160183
if event.context is None
161-
else f"## The context are: \n{event.context}\n",
184+
else f"## The context is: \n{event.context}\n",
162185
user_msg=event.user_msg,
163186
)
164187
response = await self.llm.acomplete(
@@ -167,20 +190,13 @@ async def planning(self, ctx: Context, event: PlanEvent) -> GenerateArtifactEven
167190
)
168191
# parse the response to Requirement
169192
# 1. use regex to find the json block
170-
json_block = re.search(r"```json([\s\S]*)```", response.text)
193+
json_block = re.search(
194+
r"```(?:json)?\s*([\s\S]*?)\s*```", response.text, re.IGNORECASE
195+
)
171196
if json_block is None:
172-
raise ValueError("No json block found in the response")
197+
raise ValueError("No JSON block found in the response.")
173198
# 2. parse the json block to Requirement
174199
requirement = Requirement.model_validate_json(json_block.group(1).strip())
175-
176-
# Put the planning result to the memory
177-
memory: ChatMemoryBuffer = await ctx.get("memory")
178-
memory.put(
179-
ChatMessage(
180-
role="assistant",
181-
content=f"Planning for the code generation: \n{response.text}",
182-
)
183-
)
184200
ctx.write_event_to_stream(
185201
UIEvent(
186202
type="ui_event",
@@ -190,9 +206,21 @@ async def planning(self, ctx: Context, event: PlanEvent) -> GenerateArtifactEven
190206
),
191207
)
192208
)
193-
return GenerateArtifactEvent(
194-
requirement=requirement,
209+
# Put the planning result to the memory
210+
# useful for answering step
211+
memory: ChatMemoryBuffer = await ctx.get("memory")
212+
memory.put(
213+
ChatMessage(
214+
role="assistant",
215+
content=f"The plan for next step: \n{response.text}",
216+
)
195217
)
218+
if requirement.next_step == "coding":
219+
return GenerateArtifactEvent(
220+
requirement=requirement,
221+
)
222+
else:
223+
return SynthesizeAnswerEvent()
196224

197225
@step
198226
async def generate_artifact(
@@ -271,19 +299,15 @@ async def generate_artifact(
271299
language_pattern = r"```(\w+)([\s\S]*)```"
272300
code_match = re.search(language_pattern, response.text)
273301
if code_match is None:
274-
return SynthesizeAnswerEvent(
275-
requirement=event.requirement,
276-
generated_artifact="There is no code to update. "
277-
+ response.text.strip(),
278-
)
302+
return SynthesizeAnswerEvent()
279303
else:
280304
code = code_match.group(2).strip()
281305
# Put the generated code to the memory
282306
memory: ChatMemoryBuffer = await ctx.get("memory")
283307
memory.put(
284308
ChatMessage(
285309
role="assistant",
286-
content=f"Generated code: \n{response.text}",
310+
content=f"Updated the code: \n{response.text}",
287311
)
288312
)
289313
# To show the Canvas panel for the artifact
@@ -294,17 +318,14 @@ async def generate_artifact(
294318
type=ArtifactType.CODE,
295319
created_at=int(time.time()),
296320
data=CodeArtifactData(
297-
language=event.requirement.language,
298-
file_name=event.requirement.file_name,
321+
language=event.requirement.language or "",
322+
file_name=event.requirement.file_name or "",
299323
code=code,
300324
),
301325
),
302326
)
303327
)
304-
return SynthesizeAnswerEvent(
305-
requirement=event.requirement,
306-
generated_artifact=response.text,
307-
)
328+
return SynthesizeAnswerEvent()
308329

309330
@step
310331
async def synthesize_answer(
@@ -319,9 +340,9 @@ async def synthesize_answer(
319340
ChatMessage(
320341
role="system",
321342
content="""
322-
Your responsibility is to explain the work to the user.
323-
If there is no code to update, explain the reason.
324-
If the code is updated, just summarize what changed. Don't need to include the whole code again in the response.
343+
You are a helpful assistant who is responsible for explaining the work to the user.
344+
Based on the conversation history, provide an answer to the user's question.
345+
The user has access to the code so avoid mentioning the whole code again in your response.
325346
""",
326347
)
327348
)
@@ -333,7 +354,6 @@ async def synthesize_answer(
333354
type="ui_event",
334355
data=UIEventData(
335356
state="completed",
336-
requirement=event.requirement.requirement,
337357
),
338358
)
339359
)

llama-index-server/examples/artifact/components/ui_event.jsx

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4,23 +4,21 @@ import { Progress } from "@/components/ui/progress";
44
import { Skeleton } from "@/components/ui/skeleton";
55
import { cn } from "@/lib/utils";
66
import { Markdown } from "@llamaindex/chat-ui/widgets";
7-
import { Hammer, ListChecks, Loader2 } from "lucide-react";
7+
import { ListChecks, Loader2, Wand2 } from "lucide-react";
88
import { useEffect, useState } from "react";
99

1010
const STAGE_META = {
1111
plan: {
1212
icon: ListChecks,
13-
label: "Planning",
14-
badgeText: "Step 1/2: Requirements",
13+
badgeText: "Step 1/2: Planning",
1514
gradient: "from-blue-100 via-blue-50 to-white",
1615
progress: 33,
1716
iconBg: "bg-blue-100 text-blue-600",
1817
badge: "bg-blue-100 text-blue-700",
1918
},
2019
generate: {
21-
icon: Hammer,
22-
label: "Generating",
23-
badgeText: "Step 2/2: Building",
20+
icon: Wand2,
21+
badgeText: "Step 2/2: Generating",
2422
gradient: "from-violet-100 via-violet-50 to-white",
2523
progress: 66,
2624
iconBg: "bg-violet-100 text-violet-600",
@@ -72,7 +70,6 @@ function ArtifactWorkflowCard({ event }) {
7270
<meta.icon className="w-5 h-5" />
7371
</div>
7472
<CardTitle className="text-base font-semibold flex items-center gap-2">
75-
<span>{meta.label}</span>
7673
<Badge className={cn("ml-1", meta.badge, "text-xs px-2 py-0.5")}>
7774
{meta.badgeText}
7875
</Badge>
@@ -93,7 +90,7 @@ function ArtifactWorkflowCard({ event }) {
9390
<div className="flex items-center gap-1">
9491
<Loader2 className="animate-spin text-violet-400 w-4 h-4" />
9592
<span className="text-violet-900 font-medium text-sm">
96-
Generating for the requirement:
93+
Working on the requirement:
9794
</span>
9895
</div>
9996
<div className="rounded-lg border border-violet-200 bg-violet-50 px-2 py-1 max-h-24 overflow-auto text-xs">

llama-index-server/examples/artifact/document_workflow.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,14 @@ async def prepare_chat_history(self, ctx: Context, ev: StartEvent) -> PlanEvent:
7878
user_msg = ev.user_msg
7979
if user_msg is None:
8080
raise ValueError("user_msg is required to run the workflow")
81+
await ctx.set("user_msg", user_msg)
8182
chat_history = ev.chat_history or []
83+
chat_history.append(
84+
ChatMessage(
85+
role="user",
86+
content=user_msg,
87+
)
88+
)
8289
if self.last_artifact:
8390
chat_history.append(
8491
ChatMessage(
@@ -94,7 +101,9 @@ async def prepare_chat_history(self, ctx: Context, ev: StartEvent) -> PlanEvent:
94101
await ctx.set("memory", memory)
95102
return PlanEvent(
96103
user_msg=user_msg,
97-
context=str(self.last_artifact.data) if self.last_artifact else "",
104+
context=str(self.last_artifact.model_dump_json())
105+
if self.last_artifact
106+
else "",
98107
)
99108

100109
@step

0 commit comments

Comments
 (0)