Skip to content

Commit 0031e67

Browse files
authored
Support llama-index@^0.11.11 for multi-agent template (#305)
1 parent 6e9184d commit 0031e67

File tree

6 files changed

+30
-38
lines changed

6 files changed

+30
-38
lines changed

.changeset/good-news-sneeze.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"create-llama": patch
3+
---
4+
5+
Bump llama-index to 0.11.11 for the multi-agent template

templates/types/multiagent/fastapi/app/agents/multi.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,14 @@
1-
import asyncio
21
from typing import Any, List
32

4-
from llama_index.core.tools.types import ToolMetadata, ToolOutput
5-
from llama_index.core.tools.utils import create_schema_from_function
6-
from llama_index.core.workflow import Context, Workflow
7-
3+
from app.agents.planner import StructuredPlannerAgent
84
from app.agents.single import (
95
AgentRunResult,
106
ContextAwareTool,
117
FunctionCallingAgent,
128
)
13-
from app.agents.planner import StructuredPlannerAgent
9+
from llama_index.core.tools.types import ToolMetadata, ToolOutput
10+
from llama_index.core.tools.utils import create_schema_from_function
11+
from llama_index.core.workflow import Context, Workflow
1412

1513

1614
class AgentCallTool(ContextAwareTool):
@@ -34,11 +32,11 @@ async def schema_call(input: str) -> str:
3432

3533
# overload the acall function with the ctx argument as it's needed for bubbling the events
3634
async def acall(self, ctx: Context, input: str) -> ToolOutput:
37-
task = asyncio.create_task(self.agent.run(input=input))
35+
handler = self.agent.run(input=input)
3836
# bubble all events while running the agent to the calling agent
39-
async for ev in self.agent.stream_events():
37+
async for ev in handler.stream_events():
4038
ctx.write_event_to_stream(ev)
41-
ret: AgentRunResult = await task
39+
ret: AgentRunResult = await handler
4240
response = ret.response.message.content
4341
return ToolOutput(
4442
content=str(response),

templates/types/multiagent/fastapi/app/agents/planner.py

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1-
import asyncio
21
import uuid
32
from enum import Enum
43
from typing import Any, AsyncGenerator, Dict, List, Optional, Tuple, Union
54

5+
from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
66
from llama_index.core.agent.runner.planner import (
77
DEFAULT_INITIAL_PLAN_PROMPT,
88
DEFAULT_PLAN_REFINE_PROMPT,
@@ -24,8 +24,6 @@
2424
step,
2525
)
2626

27-
from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
28-
2927

3028
class ExecutePlanEvent(Event):
3129
pass
@@ -125,16 +123,14 @@ async def execute_sub_task(
125123
is_last_tasks = ctx.data["num_sub_tasks"] == self.get_remaining_subtasks(ctx)
126124
# TODO: streaming only works without plan refining
127125
streaming = is_last_tasks and ctx.data["streaming"] and not self.refine_plan
128-
task = asyncio.create_task(
129-
self.executor.run(
130-
input=ev.sub_task.input,
131-
streaming=streaming,
132-
)
126+
handler = self.executor.run(
127+
input=ev.sub_task.input,
128+
streaming=streaming,
133129
)
134130
# bubble all events while running the executor to the planner
135-
async for event in self.executor.stream_events():
131+
async for event in handler.stream_events():
136132
ctx.write_event_to_stream(event)
137-
result = await task
133+
result: AgentRunResult = await handler
138134
if self._verbose:
139135
print("=== Done executing sub task ===\n")
140136
self.planner.state.add_completed_sub_task(ctx.data["act_plan_id"], ev.sub_task)

templates/types/multiagent/fastapi/app/api/routers/chat.py

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,12 @@
1-
import asyncio
21
import logging
32

4-
from fastapi import APIRouter, HTTPException, Request, status
5-
from llama_index.core.workflow import Workflow
6-
7-
from app.examples.factory import create_agent
83
from app.api.routers.models import (
94
ChatData,
105
)
116
from app.api.routers.vercel_response import VercelStreamResponse
7+
from app.examples.factory import create_agent
8+
from fastapi import APIRouter, HTTPException, Request, status
9+
from llama_index.core.workflow import Workflow
1210

1311
chat_router = r = APIRouter()
1412

@@ -30,11 +28,9 @@ async def chat(
3028
# params = data.data or {}
3129

3230
agent: Workflow = create_agent(chat_history=messages)
33-
task = asyncio.create_task(
34-
agent.run(input=last_message_content, streaming=True)
35-
)
31+
handler = agent.run(input=last_message_content, streaming=True)
3632

37-
return VercelStreamResponse(request, task, agent.stream_events, data)
33+
return VercelStreamResponse(request, handler, agent.stream_events, data)
3834
except Exception as e:
3935
logger.exception("Error in agent", exc_info=True)
4036
raise HTTPException(

templates/types/multiagent/fastapi/app/examples/workflow.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
1-
import asyncio
21
from typing import AsyncGenerator, List, Optional
32

4-
3+
from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
4+
from app.examples.researcher import create_researcher
5+
from llama_index.core.chat_engine.types import ChatMessage
56
from llama_index.core.workflow import (
67
Context,
78
Event,
@@ -10,9 +11,6 @@
1011
Workflow,
1112
step,
1213
)
13-
from llama_index.core.chat_engine.types import ChatMessage
14-
from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
15-
from app.examples.researcher import create_researcher
1614

1715

1816
def create_workflow(chat_history: Optional[List[ChatMessage]] = None):
@@ -132,8 +130,8 @@ async def run_agent(
132130
input: str,
133131
streaming: bool = False,
134132
) -> AgentRunResult | AsyncGenerator:
135-
task = asyncio.create_task(agent.run(input=input, streaming=streaming))
133+
handler = agent.run(input=input, streaming=streaming)
136134
# bubble all events while running the executor to the planner
137-
async for event in agent.stream_events():
135+
async for event in handler.stream_events():
138136
ctx.write_event_to_stream(event)
139-
return await task
137+
return await handler

templates/types/multiagent/fastapi/pyproject.toml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,7 @@ generate = "app.engine.generate:generate_datasource"
1212
[tool.poetry.dependencies]
1313
python = "^3.11"
1414
llama-index-agent-openai = ">=0.3.0,<0.4.0"
15-
llama-index = "0.11.9"
16-
llama-index-core = "0.11.9"
15+
llama-index = "0.11.11"
1716
fastapi = "^0.112.2"
1817
python-dotenv = "^1.0.0"
1918
uvicorn = { extras = ["standard"], version = "^0.23.2" }

0 commit comments

Comments
 (0)