Skip to content

Commit

Permalink
Support llama-index@^0.11.11 for multi-agent template (#305)
Browse files Browse the repository at this point in the history
  • Loading branch information
leehuwuj authored Sep 23, 2024
1 parent 6e9184d commit 0031e67
Show file tree
Hide file tree
Showing 6 changed files with 30 additions and 38 deletions.
5 changes: 5 additions & 0 deletions .changeset/good-news-sneeze.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"create-llama": patch
---

Bump llama-index to 0.11.11 for the multi-agent template
16 changes: 7 additions & 9 deletions templates/types/multiagent/fastapi/app/agents/multi.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,14 @@
import asyncio
from typing import Any, List

from llama_index.core.tools.types import ToolMetadata, ToolOutput
from llama_index.core.tools.utils import create_schema_from_function
from llama_index.core.workflow import Context, Workflow

from app.agents.planner import StructuredPlannerAgent
from app.agents.single import (
AgentRunResult,
ContextAwareTool,
FunctionCallingAgent,
)
from app.agents.planner import StructuredPlannerAgent
from llama_index.core.tools.types import ToolMetadata, ToolOutput
from llama_index.core.tools.utils import create_schema_from_function
from llama_index.core.workflow import Context, Workflow


class AgentCallTool(ContextAwareTool):
Expand All @@ -34,11 +32,11 @@ async def schema_call(input: str) -> str:

# overload the acall function with the ctx argument as it's needed for bubbling the events
async def acall(self, ctx: Context, input: str) -> ToolOutput:
task = asyncio.create_task(self.agent.run(input=input))
handler = self.agent.run(input=input)
# bubble all events while running the agent to the calling agent
async for ev in self.agent.stream_events():
async for ev in handler.stream_events():
ctx.write_event_to_stream(ev)
ret: AgentRunResult = await task
ret: AgentRunResult = await handler
response = ret.response.message.content
return ToolOutput(
content=str(response),
Expand Down
16 changes: 6 additions & 10 deletions templates/types/multiagent/fastapi/app/agents/planner.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import asyncio
import uuid
from enum import Enum
from typing import Any, AsyncGenerator, Dict, List, Optional, Tuple, Union

from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
from llama_index.core.agent.runner.planner import (
DEFAULT_INITIAL_PLAN_PROMPT,
DEFAULT_PLAN_REFINE_PROMPT,
Expand All @@ -24,8 +24,6 @@
step,
)

from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent


class ExecutePlanEvent(Event):
pass
Expand Down Expand Up @@ -125,16 +123,14 @@ async def execute_sub_task(
is_last_tasks = ctx.data["num_sub_tasks"] == self.get_remaining_subtasks(ctx)
# TODO: streaming only works without plan refining
streaming = is_last_tasks and ctx.data["streaming"] and not self.refine_plan
task = asyncio.create_task(
self.executor.run(
input=ev.sub_task.input,
streaming=streaming,
)
handler = self.executor.run(
input=ev.sub_task.input,
streaming=streaming,
)
# bubble all events while running the executor to the planner
async for event in self.executor.stream_events():
async for event in handler.stream_events():
ctx.write_event_to_stream(event)
result = await task
result: AgentRunResult = await handler
if self._verbose:
print("=== Done executing sub task ===\n")
self.planner.state.add_completed_sub_task(ctx.data["act_plan_id"], ev.sub_task)
Expand Down
14 changes: 5 additions & 9 deletions templates/types/multiagent/fastapi/app/api/routers/chat.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,12 @@
import asyncio
import logging

from fastapi import APIRouter, HTTPException, Request, status
from llama_index.core.workflow import Workflow

from app.examples.factory import create_agent
from app.api.routers.models import (
ChatData,
)
from app.api.routers.vercel_response import VercelStreamResponse
from app.examples.factory import create_agent
from fastapi import APIRouter, HTTPException, Request, status
from llama_index.core.workflow import Workflow

chat_router = r = APIRouter()

Expand All @@ -30,11 +28,9 @@ async def chat(
# params = data.data or {}

agent: Workflow = create_agent(chat_history=messages)
task = asyncio.create_task(
agent.run(input=last_message_content, streaming=True)
)
handler = agent.run(input=last_message_content, streaming=True)

return VercelStreamResponse(request, task, agent.stream_events, data)
return VercelStreamResponse(request, handler, agent.stream_events, data)
except Exception as e:
logger.exception("Error in agent", exc_info=True)
raise HTTPException(
Expand Down
14 changes: 6 additions & 8 deletions templates/types/multiagent/fastapi/app/examples/workflow.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import asyncio
from typing import AsyncGenerator, List, Optional


from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
from app.examples.researcher import create_researcher
from llama_index.core.chat_engine.types import ChatMessage
from llama_index.core.workflow import (
Context,
Event,
Expand All @@ -10,9 +11,6 @@
Workflow,
step,
)
from llama_index.core.chat_engine.types import ChatMessage
from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
from app.examples.researcher import create_researcher


def create_workflow(chat_history: Optional[List[ChatMessage]] = None):
Expand Down Expand Up @@ -132,8 +130,8 @@ async def run_agent(
input: str,
streaming: bool = False,
) -> AgentRunResult | AsyncGenerator:
task = asyncio.create_task(agent.run(input=input, streaming=streaming))
handler = agent.run(input=input, streaming=streaming)
# bubble all events while running the executor to the planner
async for event in agent.stream_events():
async for event in handler.stream_events():
ctx.write_event_to_stream(event)
return await task
return await handler
3 changes: 1 addition & 2 deletions templates/types/multiagent/fastapi/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@ generate = "app.engine.generate:generate_datasource"
[tool.poetry.dependencies]
python = "^3.11"
llama-index-agent-openai = ">=0.3.0,<0.4.0"
llama-index = "0.11.9"
llama-index-core = "0.11.9"
llama-index = "0.11.11"
fastapi = "^0.112.2"
python-dotenv = "^1.0.0"
uvicorn = { extras = ["standard"], version = "^0.23.2" }
Expand Down

0 comments on commit 0031e67

Please sign in to comment.