Skip to content

Commit ef070c0

Browse files
feat: support multi agent for ts (#300)
--------- Co-authored-by: Marcus Schiesser <[email protected]>
1 parent 70f7dca commit ef070c0

File tree

15 files changed

+638
-191
lines changed

15 files changed

+638
-191
lines changed

.changeset/yellow-jokes-protect.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"create-llama": patch
3+
---
4+
5+
Add multi agents template for Typescript

e2e/multiagent_template.spec.ts

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,19 +10,19 @@ import type {
1010
} from "../helpers";
1111
import { createTestDir, runCreateLlama, type AppType } from "./utils";
1212

13-
const templateFramework: TemplateFramework = "fastapi";
13+
const templateFramework: TemplateFramework = process.env.FRAMEWORK
14+
? (process.env.FRAMEWORK as TemplateFramework)
15+
: "fastapi";
1416
const dataSource: string = "--example-file";
1517
const templateUI: TemplateUI = "shadcn";
1618
const templatePostInstallAction: TemplatePostInstallAction = "runApp";
17-
const appType: AppType = "--frontend";
19+
const appType: AppType = templateFramework === "nextjs" ? "" : "--frontend";
1820
const userMessage = "Write a blog post about physical standards for letters";
1921

2022
test.describe(`Test multiagent template ${templateFramework} ${dataSource} ${templateUI} ${appType} ${templatePostInstallAction}`, async () => {
2123
test.skip(
22-
process.platform !== "linux" ||
23-
process.env.FRAMEWORK !== "fastapi" ||
24-
process.env.DATASOURCE === "--no-files",
25-
"The multiagent template currently only works with FastAPI and files. We also only run on Linux to speed up tests.",
24+
process.platform !== "linux" || process.env.DATASOURCE === "--no-files",
25+
"The multiagent template currently only works with files. We also only run on Linux to speed up tests.",
2626
);
2727
let port: number;
2828
let externalPort: number;

helpers/typescript.ts

Lines changed: 30 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,7 @@ export const installTSTemplate = async ({
3333
* Copy the template files to the target directory.
3434
*/
3535
console.log("\nInitializing project with template:", template, "\n");
36-
const type = template === "multiagent" ? "streaming" : template; // use nextjs streaming template for multiagent
37-
const templatePath = path.join(templatesDir, "types", type, framework);
36+
const templatePath = path.join(templatesDir, "types", "streaming", framework);
3837
const copySource = ["**"];
3938

4039
await copy(copySource, root, {
@@ -124,6 +123,30 @@ export const installTSTemplate = async ({
124123
cwd: path.join(compPath, "vectordbs", "typescript", vectorDb ?? "none"),
125124
});
126125

126+
if (template === "multiagent") {
127+
const multiagentPath = path.join(compPath, "multiagent", "typescript");
128+
129+
// copy workflow code for multiagent template
130+
await copy("**", path.join(root, relativeEngineDestPath, "workflow"), {
131+
parents: true,
132+
cwd: path.join(multiagentPath, "workflow"),
133+
});
134+
135+
if (framework === "nextjs") {
136+
// patch route.ts file
137+
await copy("**", path.join(root, relativeEngineDestPath), {
138+
parents: true,
139+
cwd: path.join(multiagentPath, "nextjs"),
140+
});
141+
} else if (framework === "express") {
142+
// patch chat.controller.ts file
143+
await copy("**", path.join(root, relativeEngineDestPath), {
144+
parents: true,
145+
cwd: path.join(multiagentPath, "express"),
146+
});
147+
}
148+
}
149+
127150
// copy loader component (TS only supports llama_parse and file for now)
128151
const loaderFolder = useLlamaParse ? "llama_parse" : "file";
129152
await copy("**", enginePath, {
@@ -145,6 +168,11 @@ export const installTSTemplate = async ({
145168
cwd: path.join(compPath, "engines", "typescript", engine),
146169
});
147170

171+
// copy settings to engine folder
172+
await copy("**", enginePath, {
173+
cwd: path.join(compPath, "settings", "typescript"),
174+
});
175+
148176
/**
149177
* Copy the selected UI files to the target directory and reference it.
150178
*/

questions.ts

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -410,10 +410,7 @@ export const askQuestions = async (
410410
return; // early return - no further questions needed for llamapack projects
411411
}
412412

413-
if (program.template === "multiagent") {
414-
// TODO: multi-agents currently only supports FastAPI
415-
program.framework = preferences.framework = "fastapi";
416-
} else if (program.template === "extractor") {
413+
if (program.template === "extractor") {
417414
// Extractor template only supports FastAPI, empty data sources, and llamacloud
418415
// So we just use example file for extractor template, this allows user to choose vector database later
419416
program.dataSources = [EXAMPLE_FILE];
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
import { StopEvent } from "@llamaindex/core/workflow";
2+
import { Message, streamToResponse } from "ai";
3+
import { Request, Response } from "express";
4+
import { ChatMessage, ChatResponseChunk } from "llamaindex";
5+
import { createWorkflow } from "./workflow/factory";
6+
import { toDataStream, workflowEventsToStreamData } from "./workflow/stream";
7+
8+
export const chat = async (req: Request, res: Response) => {
9+
try {
10+
const { messages }: { messages: Message[] } = req.body;
11+
const userMessage = messages.pop();
12+
if (!messages || !userMessage || userMessage.role !== "user") {
13+
return res.status(400).json({
14+
error:
15+
"messages are required in the request body and the last message must be from the user",
16+
});
17+
}
18+
19+
const chatHistory = messages as ChatMessage[];
20+
const agent = createWorkflow(chatHistory);
21+
const result = agent.run<AsyncGenerator<ChatResponseChunk>>(
22+
userMessage.content,
23+
) as unknown as Promise<StopEvent<AsyncGenerator<ChatResponseChunk>>>;
24+
25+
// convert the workflow events to a vercel AI stream data object
26+
const agentStreamData = await workflowEventsToStreamData(
27+
agent.streamEvents(),
28+
);
29+
// convert the workflow result to a vercel AI content stream
30+
const stream = toDataStream(result, {
31+
onFinal: () => agentStreamData.close(),
32+
});
33+
34+
return streamToResponse(stream, res, {}, agentStreamData);
35+
} catch (error) {
36+
console.error("[LlamaIndex]", error);
37+
return res.status(500).json({
38+
detail: (error as Error).message,
39+
});
40+
}
41+
};
Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
import { initObservability } from "@/app/observability";
2+
import { StopEvent } from "@llamaindex/core/workflow";
3+
import { Message, StreamingTextResponse } from "ai";
4+
import { ChatMessage, ChatResponseChunk } from "llamaindex";
5+
import { NextRequest, NextResponse } from "next/server";
6+
import { initSettings } from "./engine/settings";
7+
import { createWorkflow } from "./workflow/factory";
8+
import { toDataStream, workflowEventsToStreamData } from "./workflow/stream";
9+
10+
initObservability();
11+
initSettings();
12+
13+
export const runtime = "nodejs";
14+
export const dynamic = "force-dynamic";
15+
16+
export async function POST(request: NextRequest) {
17+
try {
18+
const body = await request.json();
19+
const { messages }: { messages: Message[] } = body;
20+
const userMessage = messages.pop();
21+
if (!messages || !userMessage || userMessage.role !== "user") {
22+
return NextResponse.json(
23+
{
24+
error:
25+
"messages are required in the request body and the last message must be from the user",
26+
},
27+
{ status: 400 },
28+
);
29+
}
30+
31+
const chatHistory = messages as ChatMessage[];
32+
const agent = createWorkflow(chatHistory);
33+
// TODO: fix type in agent.run in LITS
34+
const result = agent.run<AsyncGenerator<ChatResponseChunk>>(
35+
userMessage.content,
36+
) as unknown as Promise<StopEvent<AsyncGenerator<ChatResponseChunk>>>;
37+
// convert the workflow events to a vercel AI stream data object
38+
const agentStreamData = await workflowEventsToStreamData(
39+
agent.streamEvents(),
40+
);
41+
// convert the workflow result to a vercel AI content stream
42+
const stream = toDataStream(result, {
43+
onFinal: () => agentStreamData.close(),
44+
});
45+
return new StreamingTextResponse(stream, {}, agentStreamData);
46+
} catch (error) {
47+
console.error("[LlamaIndex]", error);
48+
return NextResponse.json(
49+
{
50+
detail: (error as Error).message,
51+
},
52+
{
53+
status: 500,
54+
},
55+
);
56+
}
57+
}
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
import { ChatMessage, QueryEngineTool } from "llamaindex";
2+
import { getDataSource } from "../engine";
3+
import { FunctionCallingAgent } from "./single-agent";
4+
5+
const getQueryEngineTool = async () => {
6+
const index = await getDataSource();
7+
if (!index) {
8+
throw new Error(
9+
"StorageContext is empty - call 'npm run generate' to generate the storage first.",
10+
);
11+
}
12+
13+
const topK = process.env.TOP_K ? parseInt(process.env.TOP_K) : undefined;
14+
return new QueryEngineTool({
15+
queryEngine: index.asQueryEngine({
16+
similarityTopK: topK,
17+
}),
18+
metadata: {
19+
name: "query_index",
20+
description: `Use this tool to retrieve information about the text corpus from the index.`,
21+
},
22+
});
23+
};
24+
25+
export const createResearcher = async (chatHistory: ChatMessage[]) => {
26+
return new FunctionCallingAgent({
27+
name: "researcher",
28+
tools: [await getQueryEngineTool()],
29+
systemPrompt:
30+
"You are a researcher agent. You are given a researching task. You must use your tools to complete the research.",
31+
chatHistory,
32+
});
33+
};
34+
35+
export const createWriter = (chatHistory: ChatMessage[]) => {
36+
return new FunctionCallingAgent({
37+
name: "writer",
38+
systemPrompt:
39+
"You are an expert in writing blog posts. You are given a task to write a blog post. Don't make up any information yourself.",
40+
chatHistory,
41+
});
42+
};
43+
44+
export const createReviewer = (chatHistory: ChatMessage[]) => {
45+
return new FunctionCallingAgent({
46+
name: "reviewer",
47+
systemPrompt:
48+
"You are an expert in reviewing blog posts. You are given a task to review a blog post. Review the post for logical inconsistencies, ask critical questions, and provide suggestions for improvement. Furthermore, proofread the post for grammar and spelling errors. Only if the post is good enough for publishing, then you MUST return 'The post is good.'. In all other cases return your review.",
49+
chatHistory,
50+
});
51+
};
Lines changed: 133 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,133 @@
1+
import {
2+
Context,
3+
StartEvent,
4+
StopEvent,
5+
Workflow,
6+
WorkflowEvent,
7+
} from "@llamaindex/core/workflow";
8+
import { ChatMessage, ChatResponseChunk } from "llamaindex";
9+
import { createResearcher, createReviewer, createWriter } from "./agents";
10+
import { AgentInput, AgentRunEvent } from "./type";
11+
12+
const TIMEOUT = 360 * 1000;
13+
const MAX_ATTEMPTS = 2;
14+
15+
class ResearchEvent extends WorkflowEvent<{ input: string }> {}
16+
class WriteEvent extends WorkflowEvent<{
17+
input: string;
18+
isGood: boolean;
19+
}> {}
20+
class ReviewEvent extends WorkflowEvent<{ input: string }> {}
21+
22+
export const createWorkflow = (chatHistory: ChatMessage[]) => {
23+
const runAgent = async (
24+
context: Context,
25+
agent: Workflow,
26+
input: AgentInput,
27+
) => {
28+
const run = agent.run(new StartEvent({ input }));
29+
for await (const event of agent.streamEvents()) {
30+
if (event.data instanceof AgentRunEvent) {
31+
context.writeEventToStream(event.data);
32+
}
33+
}
34+
return await run;
35+
};
36+
37+
const start = async (context: Context, ev: StartEvent) => {
38+
context.set("task", ev.data.input);
39+
return new ResearchEvent({
40+
input: `Research for this task: ${ev.data.input}`,
41+
});
42+
};
43+
44+
const research = async (context: Context, ev: ResearchEvent) => {
45+
const researcher = await createResearcher(chatHistory);
46+
const researchRes = await runAgent(context, researcher, {
47+
message: ev.data.input,
48+
});
49+
const researchResult = researchRes.data.result;
50+
return new WriteEvent({
51+
input: `Write a blog post given this task: ${context.get("task")} using this research content: ${researchResult}`,
52+
isGood: false,
53+
});
54+
};
55+
56+
const write = async (context: Context, ev: WriteEvent) => {
57+
context.set("attempts", context.get("attempts", 0) + 1);
58+
const tooManyAttempts = context.get("attempts") > MAX_ATTEMPTS;
59+
if (tooManyAttempts) {
60+
context.writeEventToStream(
61+
new AgentRunEvent({
62+
name: "writer",
63+
msg: `Too many attempts (${MAX_ATTEMPTS}) to write the blog post. Proceeding with the current version.`,
64+
}),
65+
);
66+
}
67+
68+
if (ev.data.isGood || tooManyAttempts) {
69+
// The text is ready for publication, we just use the writer to stream the output
70+
const writer = createWriter(chatHistory);
71+
const content = context.get("result");
72+
73+
return (await runAgent(context, writer, {
74+
message: `You're blog post is ready for publication. Please respond with just the blog post. Blog post: \`\`\`${content}\`\`\``,
75+
streaming: true,
76+
})) as unknown as StopEvent<AsyncGenerator<ChatResponseChunk>>;
77+
}
78+
79+
const writer = createWriter(chatHistory);
80+
const writeRes = await runAgent(context, writer, {
81+
message: ev.data.input,
82+
});
83+
const writeResult = writeRes.data.result;
84+
context.set("result", writeResult); // store the last result
85+
return new ReviewEvent({ input: writeResult });
86+
};
87+
88+
const review = async (context: Context, ev: ReviewEvent) => {
89+
const reviewer = createReviewer(chatHistory);
90+
const reviewRes = await reviewer.run(
91+
new StartEvent<AgentInput>({ input: { message: ev.data.input } }),
92+
);
93+
const reviewResult = reviewRes.data.result;
94+
const oldContent = context.get("result");
95+
const postIsGood = reviewResult.toLowerCase().includes("post is good");
96+
context.writeEventToStream(
97+
new AgentRunEvent({
98+
name: "reviewer",
99+
msg: `The post is ${postIsGood ? "" : "not "}good enough for publishing. Sending back to the writer${
100+
postIsGood ? " for publication." : "."
101+
}`,
102+
}),
103+
);
104+
if (postIsGood) {
105+
return new WriteEvent({
106+
input: "",
107+
isGood: true,
108+
});
109+
}
110+
111+
return new WriteEvent({
112+
input: `Improve the writing of a given blog post by using a given review.
113+
Blog post:
114+
\`\`\`
115+
${oldContent}
116+
\`\`\`
117+
118+
Review:
119+
\`\`\`
120+
${reviewResult}
121+
\`\`\``,
122+
isGood: false,
123+
});
124+
};
125+
126+
const workflow = new Workflow({ timeout: TIMEOUT, validate: true });
127+
workflow.addStep(StartEvent, start, { outputs: ResearchEvent });
128+
workflow.addStep(ResearchEvent, research, { outputs: WriteEvent });
129+
workflow.addStep(WriteEvent, write, { outputs: [ReviewEvent, StopEvent] });
130+
workflow.addStep(ReviewEvent, review, { outputs: WriteEvent });
131+
132+
return workflow;
133+
};

0 commit comments

Comments
 (0)