Skip to content

Commit 76c20f3

Browse files
committed
Merge remote-tracking branch 'origin' into lee/deep-research
2 parents acfc4ec + 860b9d4 commit 76c20f3

File tree

4 files changed

+361
-1
lines changed

4 files changed

+361
-1
lines changed

CHANGELOG.md

+6
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,11 @@
11
# create-llama
22

3+
## 0.3.26
4+
5+
### Patch Changes
6+
7+
- f73d46b: Fix missing copy of the multiagent code
8+
39
## 0.3.25
410

511
### Patch Changes

helpers/python.ts

+7
Original file line numberDiff line numberDiff line change
@@ -480,6 +480,13 @@ export const installPythonTemplate = async ({
480480
await copyRouterCode(root, tools ?? []);
481481
}
482482

483+
// Copy multiagents overrides
484+
if (template === "multiagent") {
485+
await copy("**", path.join(root), {
486+
cwd: path.join(compPath, "multiagent", "python"),
487+
});
488+
}
489+
483490
if (template === "multiagent" || template === "reflex") {
484491
if (useCase) {
485492
const sourcePath =

package.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "create-llama",
3-
"version": "0.3.25",
3+
"version": "0.3.26",
44
"description": "Create LlamaIndex-powered apps with one command",
55
"keywords": [
66
"rag",
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,347 @@
1+
import uuid
2+
from enum import Enum
3+
from typing import Any, AsyncGenerator, Dict, List, Optional, Tuple, Union
4+
5+
from app.workflows.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
6+
from llama_index.core.agent.runner.planner import (
7+
DEFAULT_INITIAL_PLAN_PROMPT,
8+
DEFAULT_PLAN_REFINE_PROMPT,
9+
Plan,
10+
PlannerAgentState,
11+
SubTask,
12+
)
13+
from llama_index.core.bridge.pydantic import ValidationError
14+
from llama_index.core.chat_engine.types import ChatMessage
15+
from llama_index.core.llms.function_calling import FunctionCallingLLM
16+
from llama_index.core.prompts import PromptTemplate
17+
from llama_index.core.settings import Settings
18+
from llama_index.core.tools import BaseTool
19+
from llama_index.core.workflow import (
20+
Context,
21+
Event,
22+
StartEvent,
23+
StopEvent,
24+
Workflow,
25+
step,
26+
)
27+
28+
INITIAL_PLANNER_PROMPT = """\
29+
Think step-by-step. Given a conversation, set of tools and a user request. Your responsibility is to create a plan to complete the task.
30+
The plan must adapt with the user request and the conversation.
31+
32+
The tools available are:
33+
{tools_str}
34+
35+
Conversation: {chat_history}
36+
37+
Overall Task: {task}
38+
"""
39+
40+
41+
class ExecutePlanEvent(Event):
42+
pass
43+
44+
45+
class SubTaskEvent(Event):
46+
sub_task: SubTask
47+
48+
49+
class SubTaskResultEvent(Event):
50+
sub_task: SubTask
51+
result: AgentRunResult | AsyncGenerator
52+
53+
54+
class PlanEventType(Enum):
55+
CREATED = "created"
56+
REFINED = "refined"
57+
58+
59+
class PlanEvent(AgentRunEvent):
60+
event_type: PlanEventType
61+
plan: Plan
62+
63+
@property
64+
def msg(self) -> str:
65+
sub_task_names = ", ".join(task.name for task in self.plan.sub_tasks)
66+
return f"Plan {self.event_type.value}: Let's do: {sub_task_names}"
67+
68+
69+
class StructuredPlannerAgent(Workflow):
70+
def __init__(
71+
self,
72+
*args: Any,
73+
name: str,
74+
llm: FunctionCallingLLM | None = None,
75+
tools: List[BaseTool] | None = None,
76+
timeout: float = 360.0,
77+
refine_plan: bool = False,
78+
chat_history: Optional[List[ChatMessage]] = None,
79+
**kwargs: Any,
80+
) -> None:
81+
super().__init__(*args, timeout=timeout, **kwargs)
82+
self.name = name
83+
self.refine_plan = refine_plan
84+
self.chat_history = chat_history
85+
86+
self.tools = tools or []
87+
self.planner = Planner(
88+
llm=llm,
89+
tools=self.tools,
90+
initial_plan_prompt=INITIAL_PLANNER_PROMPT,
91+
verbose=self._verbose,
92+
)
93+
# The executor is keeping the memory of all tool calls and decides to call the right tool for the task
94+
self.executor = FunctionCallingAgent(
95+
name="executor",
96+
llm=llm,
97+
tools=self.tools,
98+
write_events=False,
99+
# it's important to instruct to just return the tool call, otherwise the executor will interpret and change the result
100+
system_prompt="You are an expert in completing given tasks by calling the right tool for the task. Just return the result of the tool call. Don't add any information yourself",
101+
)
102+
self.add_workflows(executor=self.executor)
103+
104+
@step()
105+
async def create_plan(
106+
self, ctx: Context, ev: StartEvent
107+
) -> ExecutePlanEvent | StopEvent:
108+
# set streaming
109+
ctx.data["streaming"] = getattr(ev, "streaming", False)
110+
ctx.data["task"] = ev.input
111+
112+
plan_id, plan = await self.planner.create_plan(
113+
input=ev.input, chat_history=self.chat_history
114+
)
115+
ctx.data["act_plan_id"] = plan_id
116+
117+
# inform about the new plan
118+
ctx.write_event_to_stream(
119+
PlanEvent(name=self.name, event_type=PlanEventType.CREATED, plan=plan)
120+
)
121+
if self._verbose:
122+
print("=== Executing plan ===\n")
123+
return ExecutePlanEvent()
124+
125+
@step()
126+
async def execute_plan(self, ctx: Context, ev: ExecutePlanEvent) -> SubTaskEvent:
127+
upcoming_sub_tasks = self.planner.state.get_next_sub_tasks(
128+
ctx.data["act_plan_id"]
129+
)
130+
131+
if upcoming_sub_tasks:
132+
# Execute only the first sub-task
133+
# otherwise the executor will get over-lapping messages
134+
# alternatively, we could use one executor for all sub tasks
135+
next_sub_task = upcoming_sub_tasks[0]
136+
return SubTaskEvent(sub_task=next_sub_task)
137+
138+
return None
139+
140+
@step()
141+
async def execute_sub_task(
142+
self, ctx: Context, ev: SubTaskEvent
143+
) -> SubTaskResultEvent:
144+
if self._verbose:
145+
print(f"=== Executing sub task: {ev.sub_task.name} ===")
146+
is_last_tasks = self.get_remaining_subtasks(ctx) == 1
147+
# TODO: streaming only works without plan refining
148+
streaming = is_last_tasks and ctx.data["streaming"] and not self.refine_plan
149+
handler = self.executor.run(
150+
input=ev.sub_task.input,
151+
streaming=streaming,
152+
)
153+
# bubble all events while running the executor to the planner
154+
async for event in handler.stream_events():
155+
# Don't write the StopEvent from sub task to the stream
156+
if type(event) is not StopEvent:
157+
ctx.write_event_to_stream(event)
158+
result: AgentRunResult = await handler
159+
if self._verbose:
160+
print("=== Done executing sub task ===\n")
161+
self.planner.state.add_completed_sub_task(ctx.data["act_plan_id"], ev.sub_task)
162+
return SubTaskResultEvent(sub_task=ev.sub_task, result=result)
163+
164+
@step()
165+
async def gather_results(
166+
self, ctx: Context, ev: SubTaskResultEvent
167+
) -> ExecutePlanEvent | StopEvent:
168+
result = ev
169+
170+
upcoming_sub_tasks = self.get_upcoming_sub_tasks(ctx)
171+
# if no more tasks to do, stop workflow and send result of last step
172+
if upcoming_sub_tasks == 0:
173+
return StopEvent(result=result.result)
174+
175+
if self.refine_plan:
176+
# store the result for refining the plan
177+
ctx.data["results"] = ctx.data.get("results", {})
178+
ctx.data["results"][result.sub_task.name] = result.result
179+
180+
new_plan = await self.planner.refine_plan(
181+
ctx.data["task"], ctx.data["act_plan_id"], ctx.data["results"]
182+
)
183+
# inform about the new plan
184+
if new_plan is not None:
185+
ctx.write_event_to_stream(
186+
PlanEvent(
187+
name=self.name, event_type=PlanEventType.REFINED, plan=new_plan
188+
)
189+
)
190+
191+
# continue executing plan
192+
return ExecutePlanEvent()
193+
194+
def get_upcoming_sub_tasks(self, ctx: Context):
195+
upcoming_sub_tasks = self.planner.state.get_next_sub_tasks(
196+
ctx.data["act_plan_id"]
197+
)
198+
return len(upcoming_sub_tasks)
199+
200+
def get_remaining_subtasks(self, ctx: Context):
201+
remaining_subtasks = self.planner.state.get_remaining_subtasks(
202+
ctx.data["act_plan_id"]
203+
)
204+
return len(remaining_subtasks)
205+
206+
207+
# Concern dealing with creating and refining a plan, extracted from https://github.com/run-llama/llama_index/blob/main/llama-index-core/llama_index/core/agent/runner/planner.py#L138
208+
class Planner:
209+
def __init__(
210+
self,
211+
llm: FunctionCallingLLM | None = None,
212+
tools: List[BaseTool] | None = None,
213+
initial_plan_prompt: Union[str, PromptTemplate] = DEFAULT_INITIAL_PLAN_PROMPT,
214+
plan_refine_prompt: Union[str, PromptTemplate] = DEFAULT_PLAN_REFINE_PROMPT,
215+
verbose: bool = True,
216+
) -> None:
217+
if llm is None:
218+
llm = Settings.llm
219+
self.llm = llm
220+
assert self.llm.metadata.is_function_calling_model
221+
222+
self.tools = tools or []
223+
self.state = PlannerAgentState()
224+
self.verbose = verbose
225+
226+
if isinstance(initial_plan_prompt, str):
227+
initial_plan_prompt = PromptTemplate(initial_plan_prompt)
228+
self.initial_plan_prompt = initial_plan_prompt
229+
230+
if isinstance(plan_refine_prompt, str):
231+
plan_refine_prompt = PromptTemplate(plan_refine_prompt)
232+
self.plan_refine_prompt = plan_refine_prompt
233+
234+
async def create_plan(
235+
self, input: str, chat_history: Optional[List[ChatMessage]] = None
236+
) -> Tuple[str, Plan]:
237+
tools = self.tools
238+
tools_str = ""
239+
for tool in tools:
240+
tools_str += tool.metadata.name + ": " + tool.metadata.description + "\n"
241+
242+
try:
243+
plan = await self.llm.astructured_predict(
244+
Plan,
245+
self.initial_plan_prompt,
246+
tools_str=tools_str,
247+
task=input,
248+
chat_history=chat_history,
249+
)
250+
except (ValueError, ValidationError):
251+
if self.verbose:
252+
print("No complex plan predicted. Defaulting to a single task plan.")
253+
plan = Plan(
254+
sub_tasks=[
255+
SubTask(
256+
name="default", input=input, expected_output="", dependencies=[]
257+
)
258+
]
259+
)
260+
261+
if self.verbose:
262+
print("=== Initial plan ===")
263+
for sub_task in plan.sub_tasks:
264+
print(
265+
f"{sub_task.name}:\n{sub_task.input} -> {sub_task.expected_output}\ndeps: {sub_task.dependencies}\n\n"
266+
)
267+
268+
plan_id = str(uuid.uuid4())
269+
self.state.plan_dict[plan_id] = plan
270+
271+
return plan_id, plan
272+
273+
async def refine_plan(
274+
self,
275+
input: str,
276+
plan_id: str,
277+
completed_sub_tasks: Dict[str, str],
278+
) -> Optional[Plan]:
279+
"""Refine a plan."""
280+
prompt_args = self.get_refine_plan_prompt_kwargs(
281+
plan_id, input, completed_sub_tasks
282+
)
283+
284+
try:
285+
new_plan = await self.llm.astructured_predict(
286+
Plan, self.plan_refine_prompt, **prompt_args
287+
)
288+
289+
self._update_plan(plan_id, new_plan)
290+
291+
return new_plan
292+
except (ValueError, ValidationError) as e:
293+
# likely no new plan predicted
294+
if self.verbose:
295+
print(f"No new plan predicted: {e}")
296+
return None
297+
298+
def _update_plan(self, plan_id: str, new_plan: Plan) -> None:
299+
"""Update the plan."""
300+
# update state with new plan
301+
self.state.plan_dict[plan_id] = new_plan
302+
303+
if self.verbose:
304+
print("=== Refined plan ===")
305+
for sub_task in new_plan.sub_tasks:
306+
print(
307+
f"{sub_task.name}:\n{sub_task.input} -> {sub_task.expected_output}\ndeps: {sub_task.dependencies}\n\n"
308+
)
309+
310+
def get_refine_plan_prompt_kwargs(
311+
self,
312+
plan_id: str,
313+
task: str,
314+
completed_sub_task: Dict[str, str],
315+
) -> dict:
316+
"""Get the refine plan prompt."""
317+
# gather completed sub-tasks and response pairs
318+
completed_outputs_str = ""
319+
for sub_task_name, task_output in completed_sub_task.items():
320+
task_str = f"{sub_task_name}:\n\t{task_output!s}\n"
321+
completed_outputs_str += task_str
322+
323+
# get a string for the remaining sub-tasks
324+
remaining_sub_tasks = self.state.get_remaining_subtasks(plan_id)
325+
remaining_sub_tasks_str = "" if len(remaining_sub_tasks) != 0 else "None"
326+
for sub_task in remaining_sub_tasks:
327+
task_str = (
328+
f"SubTask(name='{sub_task.name}', "
329+
f"input='{sub_task.input}', "
330+
f"expected_output='{sub_task.expected_output}', "
331+
f"dependencies='{sub_task.dependencies}')\n"
332+
)
333+
remaining_sub_tasks_str += task_str
334+
335+
# get the tools string
336+
tools = self.tools
337+
tools_str = ""
338+
for tool in tools:
339+
tools_str += tool.metadata.name + ": " + tool.metadata.description + "\n"
340+
341+
# return the kwargs
342+
return {
343+
"tools_str": tools_str.strip(),
344+
"task": task.strip(),
345+
"completed_outputs": completed_outputs_str.strip(),
346+
"remaining_sub_tasks": remaining_sub_tasks_str.strip(),
347+
}

0 commit comments

Comments
 (0)