From 652397209410c35364904dbe3ba2c57059d3f12a Mon Sep 17 00:00:00 2001 From: Sarmad Qadri Date: Tue, 1 Apr 2025 23:47:29 -0400 Subject: [PATCH 1/9] WIP app server changes --- examples/mcp_basic_slack_agent/main.py | 1 + examples/mcp_hello_world/main.py | 7 +- examples/mcp_researcher/main.py | 15 +- examples/workflow_mcp_server/README.md | 90 + examples/workflow_mcp_server/client.py | 178 ++ .../workflow_mcp_server/mcp_agent.config.yaml | 29 + .../mcp_agent.secrets.yaml.example | 5 + examples/workflow_mcp_server/requirements.txt | 3 + examples/workflow_mcp_server/server.py | 258 +++ src/mcp_agent/agents/agent.py | 4 +- src/mcp_agent/app.py | 3 + src/mcp_agent/app_server.py | 1468 +++++++++++++++++ src/mcp_agent/app_server_types.py | 49 + src/mcp_agent/config.py | 3 +- src/mcp_agent/context.py | 3 + src/mcp_agent/executor/workflow.py | 45 +- src/mcp_agent/mcp/mcp_agent_server.py | 56 - .../workflows/llm/augmented_llm_openai.py | 6 +- 18 files changed, 2150 insertions(+), 73 deletions(-) create mode 100644 examples/workflow_mcp_server/README.md create mode 100644 examples/workflow_mcp_server/client.py create mode 100644 examples/workflow_mcp_server/mcp_agent.config.yaml create mode 100644 examples/workflow_mcp_server/mcp_agent.secrets.yaml.example create mode 100644 examples/workflow_mcp_server/requirements.txt create mode 100644 examples/workflow_mcp_server/server.py create mode 100644 src/mcp_agent/app_server.py create mode 100644 src/mcp_agent/app_server_types.py delete mode 100644 src/mcp_agent/mcp/mcp_agent_server.py diff --git a/examples/mcp_basic_slack_agent/main.py b/examples/mcp_basic_slack_agent/main.py index a606f6161..3826c943a 100644 --- a/examples/mcp_basic_slack_agent/main.py +++ b/examples/mcp_basic_slack_agent/main.py @@ -7,6 +7,7 @@ app = MCPApp(name="mcp_basic_agent") + async def example_usage(): async with app.run() as agent_app: logger = agent_app.logger diff --git a/examples/mcp_hello_world/main.py b/examples/mcp_hello_world/main.py index e87e105c7..f199d714b 100644 --- a/examples/mcp_hello_world/main.py +++ b/examples/mcp_hello_world/main.py @@ -33,9 +33,12 @@ async def example_usage(): try: filesystem_client = await connection_manager.get_server( - server_name="filesystem", client_session_factory=MCPAgentClientSession + server_name="filesystem", + client_session_factory=MCPAgentClientSession, + ) + logger.info( + "filesystem: Connected to server with persistent connection." ) - logger.info("filesystem: Connected to server with persistent connection.") fetch_client = await connection_manager.get_server( server_name="fetch", client_session_factory=MCPAgentClientSession diff --git a/examples/mcp_researcher/main.py b/examples/mcp_researcher/main.py index aa1c66f6b..e7d470c94 100644 --- a/examples/mcp_researcher/main.py +++ b/examples/mcp_researcher/main.py @@ -13,6 +13,7 @@ app = MCPApp(name="mcp_root_test") + async def example_usage(): async with app.run() as agent_app: folder_path = Path("agent_folder") @@ -22,13 +23,13 @@ async def example_usage(): # Overwrite the config because full path to agent folder needs to be passed context.config.mcp.servers["interpreter"].args = [ - "run", - "-i", - "--rm", - "--pull=always", - "-v", - f"{os.path.abspath('agent_folder')}:/mnt/data/", - "ghcr.io/evalstate/mcp-py-repl:latest", + "run", + "-i", + "--rm", + "--pull=always", + "-v", + f"{os.path.abspath('agent_folder')}:/mnt/data/", + "ghcr.io/evalstate/mcp-py-repl:latest", ] async with MCPConnectionManager(context.server_registry): diff --git a/examples/workflow_mcp_server/README.md b/examples/workflow_mcp_server/README.md new file mode 100644 index 000000000..a70b67cdb --- /dev/null +++ b/examples/workflow_mcp_server/README.md @@ -0,0 +1,90 @@ +# Workflow MCP Server Example + +This example demonstrates how to: + +1. Create custom workflows using the MCP Agent framework +2. Expose those workflows as MCP tools via an MCP server +3. Connect to the workflow MCP server from a client application + +## Concepts Demonstrated + +- Using the `Workflow` base class to create custom workflows +- Registering workflows with an `MCPApp` +- Exposing workflows as MCP tools using `app_server.py` +- Connecting to a workflow server using `gen_client` +- Running workflows and monitoring their progress + +## Workflows in this Example + +1. **DataProcessorWorkflow**: A workflow that processes data in three steps: + - Finding and retrieving content from a source (file or URL) + - Analyzing the content + - Formatting the results + +2. **SummarizationWorkflow**: A workflow that summarizes text content: + - Generates a concise summary + - Extracts key points + - Returns structured data + +## How to Run + +1. Copy the example secrets file: + ``` + cp mcp_agent.secrets.yaml.example mcp_agent.secrets.yaml + ``` + +2. Edit `mcp_agent.secrets.yaml` to add your API keys. + +3. Run the client, which will automatically start the server: + ``` + uv run client.py + ``` + +## Code Structure + +- `server.py`: Defines the workflows and creates the MCP server +- `client.py`: Connects to the server and runs the workflows +- `mcp_agent.config.yaml`: Configuration for MCP servers and other settings +- `mcp_agent.secrets.yaml`: Secret API keys (not included in repository) + +## Understanding the Code + +### Workflow Definition + +Workflows are defined by subclassing the `Workflow` base class and implementing: +- The `run` method containing the main workflow logic +- Optional `initialize` and `cleanup` methods for setup and teardown + +```python +class DataProcessorWorkflow(Workflow[str]): + async def run(self, source: str, analysis_prompt: Optional[str] = None, output_format: Optional[str] = None) -> WorkflowResult[str]: + # Workflow implementation... +``` + +### Registering a Workflow + +Workflows are registered with the MCPApp using the `@app.workflow` decorator: + +```python +app = MCPApp(name="workflow_mcp_server") + +@app.workflow +class DataProcessorWorkflowRegistered(DataProcessorWorkflow): + pass +``` + +### Exposing Workflows as Tools + +The MCP server automatically exposes workflows as tools, creating endpoints for: +- Running a workflow: `workflows/{workflow_id}/run` +- Checking status: `workflows/{workflow_id}/get_status` +- Controlling workflow execution: `workflows/{workflow_id}/pause`, `workflows/{workflow_id}/resume`, `workflows/{workflow_id}/cancel` + +### Connecting to the Workflow Server + +The client connects to the workflow server using the `gen_client` function: + +```python +async with gen_client("workflow_server", context.server_registry) as server: + # Connect and use the server +``` \ No newline at end of file diff --git a/examples/workflow_mcp_server/client.py b/examples/workflow_mcp_server/client.py new file mode 100644 index 000000000..e7bf598e0 --- /dev/null +++ b/examples/workflow_mcp_server/client.py @@ -0,0 +1,178 @@ +import asyncio +import time + +from mcp_agent.app import MCPApp +from mcp_agent.mcp.gen_client import gen_client + + +async def main(): + # Create MCPApp to get the server registry + app = MCPApp(name="workflow_mcp_client") + async with app.run() as client_app: + logger = client_app.logger + context = client_app.context + + # Connect to the workflow server + logger.info("Connecting to workflow server...") + + # Override the server configuration to point to our local script + context.server_registry.add_server( + "workflow_server", + command="uv", + args=["run", "server.py"], + description="Local workflow server exposing data processing and summarization workflows", + ) + + # Connect to the workflow server + async with gen_client("workflow_server", context.server_registry) as server: + # List available tools + tools = await server.list_tools() + logger.info( + "Available tools:", data={"tools": [tool.name for tool in tools]} + ) + + # List available workflows + logger.info("Fetching available workflows...") + workflows_response = await server.call_tool("workflows/list", {}) + + workflows = {} + if workflows_response.content and len(workflows_response.content) > 0: + workflows = workflows_response.content[0].text + + logger.info( + "Available workflows:", data={"workflows": list(workflows.keys())} + ) + + # Run summarization workflow + logger.info("Running the SummarizationWorkflowRegistered workflow...") + sample_text = """ + The Model Context Protocol (MCP) is a standardized API for AI assistants to communicate with tools + and services in their context. This protocol standardizes the way assistants access data through + tool definitions, tools calls, and file/URL content. It is designed to make it easy for developers + to give AI assistants access to data and tools, and for AI assistants to understand how to interact + with those tools. The protocol defines a consistent pattern for tool discovery, invocation, + and response handling that works across different AI assistant implementations. + """ + + # Start the summarization workflow + workflow_run_response = await server.call_tool( + "workflows/SummarizationWorkflowRegistered/run", + { + "args": { + "content": sample_text, + "max_length": 200, + "style": "technical", + "key_points": 3, + } + }, + ) + + if workflow_run_response.content and len(workflow_run_response.content) > 0: + workflow_result = workflow_run_response.content[0].text + workflow_id = workflow_result.get("workflow_id") + logger.info( + "Summarization workflow started", data={"workflow_id": workflow_id} + ) + + # Wait for workflow to complete + logger.info("Waiting for workflow to complete...") + await asyncio.sleep(5) + + # Check workflow status + status_response = await server.call_tool( + "workflows/SummarizationWorkflowRegistered/get_status", + {"workflow_instance_id": workflow_id}, + ) + + if status_response.content and len(status_response.content) > 0: + status = status_response.content[0].text + + if status.get("completed", False) and "result" in status: + logger.info("Workflow completed!") + result = status.get("result", {}) + + if "value" in result: + summary = result["value"].get( + "summary", "No summary available" + ) + key_points = result["value"].get( + "key_points", "No key points available" + ) + + logger.info("Summary:", data={"summary": summary}) + logger.info("Key Points:", data={"key_points": key_points}) + else: + logger.info("Workflow status:", data={"status": status}) + + # Run data processor workflow + logger.info("Running the DataProcessorWorkflowRegistered workflow...") + + # Use a URL that the server's fetch tool can access + data_workflow_response = await server.call_tool( + "workflows/DataProcessorWorkflowRegistered/run", + { + "args": { + "source": "https://modelcontextprotocol.io/introduction", + "analysis_prompt": "Analyze what MCP is and its key benefits", + "output_format": "markdown", + } + }, + ) + + if ( + data_workflow_response.content + and len(data_workflow_response.content) > 0 + ): + workflow_result = data_workflow_response.content[0].text + workflow_id = workflow_result.get("workflow_id") + logger.info( + "Data processor workflow started", data={"workflow_id": workflow_id} + ) + + # Wait for workflow to complete (this might take longer) + logger.info("Waiting for data processor workflow to complete...") + max_wait = 30 # Maximum wait time in seconds + wait_interval = 5 # Check every 5 seconds + + for _ in range(max_wait // wait_interval): + await asyncio.sleep(wait_interval) + + # Check workflow status + status_response = await server.call_tool( + "workflows/DataProcessorWorkflowRegistered/get_status", + {"workflow_instance_id": workflow_id}, + ) + + if status_response.content and len(status_response.content) > 0: + status = status_response.content[0].text + + if status.get("completed", False): + result = status.get("result", {}) + logger.info("Data processor workflow completed!") + + if "value" in result: + logger.info( + "Processed Data:", + data={"data": result["value"][:500] + "..."}, + ) + break + + # If failed, break early + if status.get("error"): + logger.error( + "Workflow failed:", data={"error": status.get("error")} + ) + break + else: + logger.warning( + "Workflow took too long to complete, giving up after waiting" + ) + + +if __name__ == "__main__": + start = time.time() + asyncio.run(main()) + end = time.time() + t = end - start + + print(f"Total run time: {t:.2f}s") diff --git a/examples/workflow_mcp_server/mcp_agent.config.yaml b/examples/workflow_mcp_server/mcp_agent.config.yaml new file mode 100644 index 000000000..1564d2ed4 --- /dev/null +++ b/examples/workflow_mcp_server/mcp_agent.config.yaml @@ -0,0 +1,29 @@ +execution_engine: asyncio +logger: + transports: [console] + level: debug + path: "logs/mcp-agent.jsonl" + +mcp: + servers: + fetch: + command: "uvx" + args: ["mcp-server-fetch"] + description: "Fetch content at URLs from the world wide web" + filesystem: + command: "npx" + args: + [ + "-y", + "@modelcontextprotocol/server-filesystem", + # Current directory will be added by the code + ] + description: "Read and write files on the filesystem" + workflow_server: + command: "uv" + args: ["run", "server.py"] + description: "Local workflow server exposing data processing and summarization workflows" + +openai: + default_model: gpt-4o + # Secrets are loaded from mcp_agent.secrets.yaml \ No newline at end of file diff --git a/examples/workflow_mcp_server/mcp_agent.secrets.yaml.example b/examples/workflow_mcp_server/mcp_agent.secrets.yaml.example new file mode 100644 index 000000000..6005309ce --- /dev/null +++ b/examples/workflow_mcp_server/mcp_agent.secrets.yaml.example @@ -0,0 +1,5 @@ +openai: + api_key: sk-your-openai-key + +anthropic: + api_key: sk-ant-your-anthropic-key \ No newline at end of file diff --git a/examples/workflow_mcp_server/requirements.txt b/examples/workflow_mcp_server/requirements.txt new file mode 100644 index 000000000..1e6e456b7 --- /dev/null +++ b/examples/workflow_mcp_server/requirements.txt @@ -0,0 +1,3 @@ +mcp-agent +rich +openai>=1.0.0 \ No newline at end of file diff --git a/examples/workflow_mcp_server/server.py b/examples/workflow_mcp_server/server.py new file mode 100644 index 000000000..5527e37c4 --- /dev/null +++ b/examples/workflow_mcp_server/server.py @@ -0,0 +1,258 @@ +import asyncio +import os +from typing import Dict, Any, Optional +from mcp.server.helpers.stdio import stdio_server + +from mcp_agent.app import MCPApp +from mcp_agent.app_server import create_mcp_server_for_app +from mcp_agent.agents.agent import Agent +from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM +from mcp_agent.executor.workflow import Workflow, WorkflowResult + + +class DataProcessorWorkflow(Workflow[str]): + """ + A workflow that processes data using multiple agents, each specialized for a different task. + This workflow demonstrates how to use multiple agents to process data in a sequence. + """ + + async def initialize(self): + await super().initialize() + self.state.status = "ready" + + # Create agents for different steps of the workflow + self.finder_agent = Agent( + name="finder", + instruction="You are specialized in finding and retrieving information from files or URLs.", + server_names=["fetch", "filesystem"], + ) + + self.analyzer_agent = Agent( + name="analyzer", + instruction="You are specialized in analyzing text data and extracting key insights.", + server_names=["fetch"], + ) + + self.formatter_agent = Agent( + name="formatter", + instruction="You are specialized in formatting data into structured outputs.", + server_names=[], + ) + + # Initialize the agents + await self.finder_agent.initialize() + await self.analyzer_agent.initialize() + await self.formatter_agent.initialize() + + # Attach LLMs to the agents + self.finder_llm = await self.finder_agent.attach_llm(OpenAIAugmentedLLM) + self.analyzer_llm = await self.analyzer_agent.attach_llm(OpenAIAugmentedLLM) + self.formatter_llm = await self.formatter_agent.attach_llm(OpenAIAugmentedLLM) + + async def cleanup(self): + # Clean up resources + await self.finder_agent.cleanup() + await self.analyzer_agent.cleanup() + await self.formatter_agent.cleanup() + await super().cleanup() + + async def run( + self, + source: str, + analysis_prompt: Optional[str] = None, + output_format: Optional[str] = None, + ) -> WorkflowResult[str]: + """ + Run the data processing workflow. + + Args: + source: The source to process. Can be a file path or URL. + analysis_prompt: Optional specific instructions for the analysis step. + output_format: Optional format for the output (e.g., "json", "markdown", "summary"). + + Returns: + WorkflowResult containing the processed data. + """ + self.state.status = "running" + self._logger.info(f"Starting data processing workflow for source: {source}") + + # Step 1: Find and retrieve the data + self._logger.info("Step 1: Finding and retrieving data") + self.state.metadata["current_step"] = "retrieval" + + retrieval_prompt = f"Retrieve the content from {source} and return it verbatim." + raw_data = await self.finder_llm.generate_str(retrieval_prompt) + + self.state.metadata["retrieval_completed"] = True + self.state.metadata["content_length"] = len(raw_data) + + # Step 2: Analyze the data + self._logger.info("Step 2: Analyzing data") + self.state.metadata["current_step"] = "analysis" + + analysis_instruction = ( + analysis_prompt + or "Analyze this content and extract the key points, main themes, and most important information." + ) + analysis = await self.analyzer_llm.generate_str( + f"{analysis_instruction}\n\nHere is the content to analyze:\n\n{raw_data[:5000]}" # Limit to 5000 chars for safety + ) + + self.state.metadata["analysis_completed"] = True + + # Step 3: Format the result + self._logger.info("Step 3: Formatting output") + self.state.metadata["current_step"] = "formatting" + + format_instruction = output_format or "markdown" + format_prompt = f"Format the following analysis into {format_instruction} format, highlighting the most important points:\n\n{analysis}" + + formatted_result = await self.formatter_llm.generate_str(format_prompt) + + self.state.metadata["formatting_completed"] = True + self.state.status = "completed" + + # Create and return the final result + result = WorkflowResult[str]( + value=formatted_result, + metadata={ + "source": source, + "content_length": len(raw_data), + "analysis_prompt": analysis_prompt, + "output_format": format_instruction, + "workflow_completed": True, + }, + start_time=self.state.metadata.get("start_time"), + end_time=self.state.updated_at, + ) + + return result + + +class SummarizationWorkflow(Workflow[Dict[str, Any]]): + """ + A workflow that summarizes text content with customizable parameters. + This workflow demonstrates how to create a simple summarization pipeline. + """ + + async def initialize(self): + await super().initialize() + + # Create an agent for summarization + self.summarizer_agent = Agent( + name="summarizer", + instruction="You are specialized in summarizing content clearly and concisely.", + server_names=["fetch", "filesystem"], + ) + + # Initialize the agent + await self.summarizer_agent.initialize() + + # Attach LLM to the agent + self.summarizer_llm = await self.summarizer_agent.attach_llm(OpenAIAugmentedLLM) + + async def cleanup(self): + await self.summarizer_agent.cleanup() + await super().cleanup() + + async def run( + self, + content: str, + max_length: int = 500, + style: str = "concise", + key_points: int = 3, + ) -> WorkflowResult[Dict[str, Any]]: + """ + Summarize the provided content. + + Args: + content: The text content to summarize. + max_length: Maximum length of the summary in characters. + style: Style of summarization (concise, detailed, technical, simple). + key_points: Number of key points to include. + + Returns: + WorkflowResult containing the summary and metadata. + """ + self.state.status = "running" + self._logger.info( + f"Starting summarization workflow (style: {style}, key_points: {key_points})" + ) + + # Record the start time + start_time = self.state.updated_at + + # Build the summarization prompt + prompt = f""" + Summarize the following content in a {style} style. + Include {key_points} key points. + Keep the summary under {max_length} characters. + + Content to summarize: + --- + {content[:10000]} # Limit content to 10,000 chars for safety + --- + """ + + summary = await self.summarizer_llm.generate_str(prompt) + + # Extract key points using a follow-up prompt + key_points_prompt = f"Based on the content I just summarized, list exactly {key_points} key points in bullet point format." + key_points_list = await self.summarizer_llm.generate_str(key_points_prompt) + + self.state.status = "completed" + + # Create the structured result + result = WorkflowResult[Dict[str, Any]]( + value={ + "summary": summary, + "key_points": key_points_list, + "style": style, + "length": len(summary), + "requested_max_length": max_length, + }, + metadata={ + "workflow_name": self.name, + "content_length": len(content), + "completion_status": "success", + }, + start_time=start_time, + end_time=self.state.updated_at, + ) + + return result + + +# Initialize the app +app = MCPApp(name="workflow_mcp_server") + + +# Register workflows with the app +@app.workflow +class DataProcessorWorkflowRegistered(DataProcessorWorkflow): + pass + + +@app.workflow +class SummarizationWorkflowRegistered(SummarizationWorkflow): + pass + + +async def main(): + # Initialize the app + await app.initialize() + + # Add the current directory to the filesystem server's args if needed + context = app.context + context.config.mcp.servers["filesystem"].args.extend([os.getcwd()]) + + # Create the MCP server + mcp_server = create_mcp_server_for_app(app) + + # Run the server + async with stdio_server() as (read_stream, write_stream): + await mcp_server.run(read_stream, write_stream) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/src/mcp_agent/agents/agent.py b/src/mcp_agent/agents/agent.py index 6c67eed0e..b7e94c78c 100644 --- a/src/mcp_agent/agents/agent.py +++ b/src/mcp_agent/agents/agent.py @@ -61,6 +61,7 @@ def __init__( self.functions = functions or [] self.executor = self.context.executor self.logger = get_logger(f"{__name__}.{name}") + self.llm: AugmentedLLM | None = None # Map function names to tools self._function_tool_map: Dict[str, FastTool] = {} @@ -95,7 +96,8 @@ async def attach_llm(self, llm_factory: Callable[..., LLM]) -> LLM: Returns: An instance of AugmentedLLM or one of its subclasses. """ - return llm_factory(agent=self) + self.llm = llm_factory(agent=self) + return self.llm async def shutdown(self): """ diff --git a/src/mcp_agent/app.py b/src/mcp_agent/app.py index 61f7ba6d1..a37faca51 100644 --- a/src/mcp_agent/app.py +++ b/src/mcp_agent/app.py @@ -148,6 +148,9 @@ async def initialize(self): self._context.upstream_session = self._upstream_session self._context.model_selector = self._model_selector + # Store a reference to this app instance in the context for easier access + self._context.app = self + self._initialized = True self.logger.info( "MCPAgent initialized", diff --git a/src/mcp_agent/app_server.py b/src/mcp_agent/app_server.py new file mode 100644 index 000000000..f013d609b --- /dev/null +++ b/src/mcp_agent/app_server.py @@ -0,0 +1,1468 @@ +""" +MCPAgentServer - Exposes mcp-agent workflows and agents as MCP tools. +""" + +import asyncio +import inspect +import uuid +from collections.abc import AsyncIterator +from contextlib import asynccontextmanager +from typing import Any, Dict, List, Literal, Optional, Type + +from mcp.server.fastmcp import Context as MCPContext, FastMCP +from mcp.server.fastmcp.exceptions import ToolError +from mcp.shared.session import BaseSession +from mcp.types import ToolListChangedNotification + +from mcp_agent.app import MCPApp +from mcp_agent.app_server_types import ( + MCPMessageParam, + MCPMessageResult, + create_model_from_schema, +) +from mcp_agent.agents.agent import Agent +from mcp_agent.config import MCPServerSettings +from mcp_agent.context_dependent import ContextDependent +from mcp_agent.executor.workflow import Workflow +from mcp_agent.executor.workflow_signal import Signal +from mcp_agent.logging.logger import get_logger +from mcp_agent.mcp_server_registry import ServerRegistry +from mcp_agent.workflows.llm.augmented_llm import MessageParamT, RequestParams + +logger = get_logger(__name__) + + +class ServerContext(ContextDependent): + """Context object for the MCP App server.""" + + def __init__(self, mcp: FastMCP, context=None, **kwargs): + super().__init__(context=context, **kwargs) + self.mcp = mcp + self.active_workflows: Dict[str, Any] = {} + self.active_agents: Dict[str, Agent] = {} + + # Register existing workflows from the app + for workflow_id, workflow_cls in self.context.app.workflows.items(): + self.register_workflow(workflow_id, workflow_cls) + + def register_workflow(self, workflow_id: str, workflow_cls: Type[Workflow]): + """Register a workflow class.""" + if workflow_id not in self.context.app.workflows: + self.context.app.workflows[workflow_id] = workflow_cls + # Create tools for this workflow + create_workflow_specific_tools(self.mcp, workflow_id, workflow_cls) + + def register_agent(self, agent: Agent): + """Register an agent instance.""" + if agent.name not in self.active_agents: + self.active_agents[agent.name] = agent + # Create tools for this agent + create_agent_specific_tools(self.mcp, agent.name, agent) + return agent + return self.active_agents[ + agent.name + ] # Return existing agent if already registered + + +def create_mcp_server_for_app(app: MCPApp) -> FastMCP: + """ + Create an MCP server for a given MCPApp instance. + + Args: + app: The MCPApp instance to create a server for + + Returns: + A configured FastMCP server instance + """ + + # Create a lifespan function specific to this app + @asynccontextmanager + async def app_specific_lifespan(mcp: FastMCP) -> AsyncIterator[ServerContext]: + """Initialize and manage MCPApp lifecycle.""" + # Initialize the app if it's not already initialized + await app.initialize() + + # Create the server context which is available during the lifespan of the server + server_context = ServerContext(mcp=mcp, context=app.context) + + # Register initial agent and workflow tools + create_agent_tools(mcp, server_context) + create_workflow_tools(mcp, server_context) + + try: + yield server_context + finally: + # Don't clean up the MCPApp here - let the caller handle that + pass + + # Create FastMCP server with the app's name + mcp = FastMCP( + name=app.name or "mcp_agent_server", + # TODO: saqadri (MAC) - create a much more detailed description based on all the available agents and workflows, + # or use the MCPApp's description if available. + instructions=f"MCP server exposing {app.name} workflows and agents", + lifespan=app_specific_lifespan, + ) + + # region Server Tools + + @mcp.tool(name="servers/list") + def list_servers(ctx: MCPContext) -> List[MCPServerSettings]: + """ + List all available MCP servers packaged with this MCP App server, along with their detailed information. + + Returns information about each server including its name, description, + and configuration. This helps in understanding what each server is capable of, + and consequently what this MCP App server can accomplish. + """ + server_context: ServerContext = ctx.request_context.lifespan_context + server_registry = server_context.context.server_registry + + if not server_registry: + raise ToolError("Server registry not found for MCP App Server.") + + result: List[MCPServerSettings] = [] + for _, server_settings in server_registry.registry.items(): + # Remove sensitive information from the server settings + safe_server_settings = server_settings.model_dump(exclude={"auth", "env"}) + result.append(MCPServerSettings(**safe_server_settings)) + + return result + + # region Agent Tools + + @mcp.tool(name="agents/list") + def list_agents(ctx: MCPContext) -> Dict[str, Dict[str, Any]]: + """ + List all available agents with their detailed information. + + Returns information about each agent including their name, instruction, + and the MCP servers they have access to. This helps with understanding + what each agent is designed to do before calling it. + """ + server_context: ServerContext = ctx.request_context.lifespan_context + server_registry = server_context.context.server_registry + result = {} + for name, agent in server_context.active_agents.items(): + # Format instruction - handle callable instructions + instruction = agent.instruction + if callable(instruction): + instruction = instruction({}) + + servers = _get_server_descriptions(server_registry, agent.server_names) + + # Build detailed agent info + result[name] = { + "name": name, + "instruction": instruction, + "servers": servers, + "capabilities": ["generate", "generate_str", "generate_structured"], + "tool_endpoints": [ + f"agents/{name}/generate", + f"agents/{name}/generate_str", + f"agents/{name}/generate_structured", + ], + } + + return result + + @mcp.tool(name="agents/create") + async def create_agent( + ctx: MCPContext, + name: str, + instruction: str, + server_names: List[str], + llm: Literal["openai", "anthropic"] = "openai", + ) -> Dict[str, Any]: + """ + Create a new agent with given name, instruction and list of MCP servers it is allowed to access. + + Args: + name: The name of the agent to create. It must be a unique name not already in agents/list. + instruction: Instructions for the agent (i.e. system prompt). + server_names: List of MCP server names the agent should be able to access. + These MUST be one of the names retrieved using servers/list tool endpoint. + + Returns: + Detailed information about the created agent. + """ + server_context: ServerContext = ctx.request_context.lifespan_context + + agent = Agent( + name=name, + instruction=instruction, + server_names=server_names, + context=server_context.context, + ) + + # TODO: saqadri (MAC) - Add better support for multiple LLMs. + if llm == "openai": + from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM # pylint: disable=C0415 + + await agent.attach_llm(OpenAIAugmentedLLM) + elif llm == "anthropic": + from mcp_agent.workflows.llm.augmented_llm_anthropic import ( # pylint: disable=C0415 + AnthropicAugmentedLLM, + ) + + await agent.attach_llm(AnthropicAugmentedLLM) + else: + raise ToolError( + f"Unsupported LLM type: {llm}. Only 'openai' and 'anthropic' are presently supported." + ) + + await agent.initialize() + server_context.register_agent(agent) + + # Notify that tools have changed + session: BaseSession = ctx.session + session.send_notification( + ToolListChangedNotification(method="notifications/tools/list_changed") + ) + + server_registry = server_context.context.server_registry + servers = _get_server_descriptions(server_registry, agent.server_names) + + # Return detailed agent info + return { + "name": name, + "instruction": instruction, + "servers": servers, + "capabilities": ["generate", "generate_str", "generate_structured"], + "tool_endpoints": [ + f"agents/{name}/generate", + f"agents/{name}/generate_structured", + ], + } + + @mcp.tool(name="agents/generate") + async def agent_generate( + ctx: MCPContext, + agent_name: str, + message: str | MCPMessageParam | List[MCPMessageParam], + request_params: RequestParams | None = None, + ) -> List[MCPMessageResult]: + """ + Run an agent using the given message. + This is similar to generating an LLM completion. + + Args: + agent_name: Name of the agent to use. This must be one of the names retrieved using agents/list tool endpoint. + message: The prompt to send to the agent. + request_params: Optional parameters for the request, such as max_tokens and model/model preferences. + + Returns: + The generated response from the agent. + """ + return await _agent_generate(ctx, agent_name, message, request_params) + + @mcp.tool(name="agents/generate_str") + async def agent_generate_str( + ctx: MCPContext, + agent_name: str, + message: str | MCPMessageParam | List[MCPMessageParam], + request_params: RequestParams | None = None, + ) -> str: + """ + Run an agent using the given message and return the response as a string. + Use agents/generate for results in the original format, and + use agents/generate_structured for results conforming to a specific schema. + + Args: + agent_name: Name of the agent to use. This must be one of the names retrieved using agents/list tool endpoint. + message: The prompt to send to the agent. + request_params: Optional parameters for the request, such as max_tokens and model/model preferences. + + Returns: + The generated response from the agent. + """ + return await _agent_generate_str(ctx, agent_name, message, request_params) + + @mcp.tool(name="agents/generate_structured") + async def agent_generate_structured( + ctx: MCPContext, + agent_name: str, + message: str | MCPMessageParam | List[MCPMessageParam], + response_schema: Dict[str, Any], + request_params: RequestParams | None = None, + ) -> Dict[str, Any]: + """ + Generate a structured response from an agent that matches the given schema. + + Args: + agent_name: Name of the agent to use. This must be one of the names retrieved using agents/list tool endpoint. + message: The prompt to send to the agent. + response_schema: The JSON schema that defines the shape to generate the response in. + This schema can be generated using type.schema_json() for a Pydantic model. + request_params: Optional parameters for the request, such as max_tokens and model/model preferences. + + Returns: + A dictionary representation of the structured response. + + Example: + response_schema: + { + "title": "UserProfile", + "type": "object", + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "age": { + "title": "Age", + "type": "integer", + "minimum": 0 + }, + "email": { + "title": "Email", + "type": "string", + "format": "email", + "pattern": "^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$" + } + }, + "required": [ + "name", + "age", + "email" + ] + } + """ + return await _agent_generate_structured( + ctx, agent_name, message, response_schema, request_params + ) + + # endregion + + # region Workflow Tools + + @mcp.tool(name="workflows/list") + def list_workflows(ctx: MCPContext) -> Dict[str, Dict[str, Any]]: + """ + List all available workflows exposed with their detailed information. + + Returns information about each workflow including name, description, and parameters. + This helps in making an informed decision about which workflow to run. + """ + server_config: ServerContext = ctx.request_context.lifespan_context + + result = {} + for workflow_id, workflow_cls in server_config.context.app.workflows.items(): + # Get workflow documentation + doc = workflow_cls.__doc__ or "No description available" + + # Get workflow run method parameters using inspection + parameters = {} + if hasattr(workflow_cls, "run"): + sig = inspect.signature(workflow_cls.run) + for param_name, param in sig.parameters.items(): + if param_name != "self": + param_info = { + "type": str(param.annotation) + .replace("", ""), + "required": param.default == inspect.Parameter.empty, + } + if param.default != inspect.Parameter.empty: + param_info["default"] = param.default + parameters[param_name] = param_info + + result[workflow_id] = { + "name": workflow_id, + "description": doc.strip(), + "parameters": parameters, + "capabilities": ["run", "pause", "resume", "cancel", "get_status"], + "tool_endpoints": [ + f"workflows/{workflow_id}/run", + f"workflows/{workflow_id}/get_status", + f"workflows/{workflow_id}/pause", + f"workflows/{workflow_id}/resume", + f"workflows/{workflow_id}/cancel", + ], + } + + return result + + @mcp.tool(name="workflows/list_running") + def list_running_workflows(ctx: MCPContext) -> Dict[str, Dict[str, Any]]: + """ + List all running workflow instances with their detailed status information. + + For each running workflow, returns its ID, name, current state, and available operations. + This helps in identifying and managing active workflow instances. + + Returns: + A dictionary mapping workflow IDs to their detailed status information. + """ + server_config: ServerContext = ctx.request_context.lifespan_context + + result = {} + for workflow_id, workflow in server_config.active_workflows.items(): + # Skip task entries + if workflow_id.endswith("_task"): + continue + + task = server_config.active_workflows.get(workflow_id + "_task") + + # Get workflow information + workflow_info = { + "id": workflow_id, + "name": workflow.name, + "running": task is not None and not task.done() if task else False, + "state": workflow.state.model_dump() + if hasattr(workflow, "state") + else {}, + "tool_endpoints": [ + f"workflows/{workflow.name}/get_status", + f"workflows/{workflow.name}/pause", + f"workflows/{workflow.name}/resume", + f"workflows/{workflow.name}/cancel", + ], + } + + if task and task.done(): + try: + task_result = task.result() + workflow_info["result"] = ( + task_result.model_dump() + if hasattr(task_result, "model_dump") + else str(task_result) + ) + workflow_info["completed"] = True + workflow_info["error"] = None + except Exception as e: + workflow_info["result"] = None + workflow_info["completed"] = False + workflow_info["error"] = str(e) + + result[workflow_id] = workflow_info + + return result + + @mcp.tool(name="workflows/run") + async def run_workflow( + ctx: MCPContext, + workflow_name: str, + args: Optional[Dict[str, Any]] = None, + ) -> Dict[str, Any]: + """ + Run a workflow with the given name. + + Args: + workflow_name: The name of the workflow to run. + args: Optional arguments to pass to the workflow. + + Returns: + Information about the running workflow including its ID and metadata. + """ + server_config: ServerContext = ctx.request_context.lifespan_context + app = server_config.context.app + + if workflow_name not in app.workflows: + raise ValueError(f"Workflow '{workflow_name}' not found.") + + # Create a workflow instance + workflow_cls = app.workflows[workflow_name] + workflow = workflow_cls(executor=app.executor, name=workflow_name) + + # Generate a unique ID for this workflow instance + workflow_id = str(uuid.uuid4()) + + # Store the workflow instance + server_config.active_workflows[workflow_id] = workflow + + # Run the workflow in a separate task + args = args or {} + run_task = asyncio.create_task(workflow.run(**args)) + + # Store the task to check status later + server_config.active_workflows[workflow_id + "_task"] = run_task + + # Return information about the workflow + return { + "workflow_id": workflow_id, + "workflow_name": workflow_name, + "status": "running", + "args": args, + "tool_endpoints": [ + f"workflows/{workflow_name}/get_status", + f"workflows/{workflow_name}/pause", + f"workflows/{workflow_name}/resume", + f"workflows/{workflow_name}/cancel", + ], + "message": f"Workflow {workflow_name} started with ID {workflow_id}. Use the returned workflow_id with other workflow tools.", + } + + @mcp.tool(name="workflows/get_status") + def get_workflow_status(ctx: MCPContext, workflow_id: str) -> Dict[str, Any]: + """ + Get the status of a running workflow. + + Provides detailed information about a workflow instance including its current state, + whether it's running or completed, and any results or errors encountered. + + Args: + workflow_id: The ID of the workflow to check. + + Returns: + A dictionary with comprehensive information about the workflow status. + """ + server_config: ServerContext = ctx.request_context.lifespan_context + + if workflow_id not in server_config.active_workflows: + raise ValueError(f"Workflow with ID '{workflow_id}' not found.") + + workflow = server_config.active_workflows[workflow_id] + task = server_config.active_workflows.get(workflow_id + "_task") + + status = { + "id": workflow_id, + "name": workflow.name, + "running": task is not None and not task.done() if task else False, + "state": workflow.state.model_dump() if hasattr(workflow, "state") else {}, + "available_actions": ["pause", "resume", "cancel"] + if task and not task.done() + else [], + "tool_endpoints": [ + f"workflows/{workflow.name}/get_status", + ], + } + + # Add appropriate action endpoints based on status + if task and not task.done(): + status["tool_endpoints"].extend( + [ + f"workflows/{workflow.name}/pause", + f"workflows/{workflow.name}/resume", + f"workflows/{workflow.name}/cancel", + ] + ) + + if task and task.done(): + try: + result = task.result() + + # Convert result to a useful format + if hasattr(result, "model_dump"): + result_data = result.model_dump() + elif hasattr(result, "__dict__"): + result_data = result.__dict__ + else: + result_data = str(result) + + status["result"] = result_data + status["completed"] = True + status["error"] = None + except Exception as e: + status["result"] = None + status["completed"] = False + status["error"] = str(e) + status["exception_type"] = type(e).__name__ + + return status + + @mcp.tool(name="workflows/pause") + async def pause_workflow(ctx: MCPContext, workflow_id: str) -> bool: + """ + Pause a running workflow. + + Args: + workflow_id: The ID of the workflow to pause. + + Returns: + True if the workflow was paused, False otherwise. + """ + server_config: ServerContext = ctx.request_context.lifespan_context + + if workflow_id not in server_config.active_workflows: + raise ValueError(f"Workflow with ID '{workflow_id}' not found.") + + _workflow = server_config.active_workflows[workflow_id] + + # Signal the workflow to pause + try: + await server_config.context.app.executor.signal( + "pause", workflow_id=workflow_id + ) + return True + except Exception as e: + logger.error(f"Error pausing workflow {workflow_id}: {e}") + return False + + @mcp.tool(name="workflows/resume") + async def resume_workflow( + ctx: MCPContext, workflow_id: str, input_data: Optional[str] = None + ) -> bool: + """ + Resume a paused workflow. + + Args: + workflow_id: The ID of the workflow to resume. + input_data: Optional input data to provide to the workflow. + + Returns: + True if the workflow was resumed, False otherwise. + """ + server_config: ServerContext = ctx.request_context.lifespan_context + + if workflow_id not in server_config.active_workflows: + raise ValueError(f"Workflow with ID '{workflow_id}' not found.") + + # Signal the workflow to resume + try: + signal = Signal(name="resume", workflow_id=workflow_id, payload=input_data) + await server_config.context.app.executor.signal_bus.signal(signal) + return True + except Exception as e: + logger.error(f"Error resuming workflow {workflow_id}: {e}") + return False + + @mcp.tool(name="workflows/cancel") + async def cancel_workflow(ctx: MCPContext, workflow_id: str) -> bool: + """ + Cancel a running workflow. + + Args: + workflow_id: The ID of the workflow to cancel. + + Returns: + True if the workflow was cancelled, False otherwise. + """ + server_config: ServerContext = ctx.request_context.lifespan_context + + if workflow_id not in server_config.active_workflows: + raise ValueError(f"Workflow with ID '{workflow_id}' not found.") + + task = server_config.active_workflows.get(workflow_id + "_task") + + if task and not task.done(): + # Cancel the task + task.cancel() + + # Signal the workflow to cancel + try: + await server_config.context.app.executor.signal( + "cancel", workflow_id=workflow_id + ) + + # Remove from active workflows + server_config.active_workflows.pop(workflow_id, None) + server_config.active_workflows.pop(workflow_id + "_task", None) + + return True + except Exception as e: + logger.error(f"Error cancelling workflow {workflow_id}: {e}") + return False + + return False + + @mcp.tool(name="workflow_signal/wait_for_signal") + async def wait_for_signal( + ctx: MCPContext, + signal_name: str, + workflow_id: str = None, + description: str = None, + timeout_seconds: int = None, + ) -> Dict[str, Any]: + """ + Provides information about a signal that a workflow is waiting for. + + This tool doesn't actually make the workflow wait (that's handled internally), + but it provides information about what signal is being waited for and how to + respond to it. + + Args: + signal_name: The name of the signal to wait for. + workflow_id: Optional workflow ID to associate with the signal. + description: Optional description of what the signal is for. + timeout_seconds: Optional timeout in seconds. + + Returns: + Information about the signal and how to respond to it. + """ + _server_context: ServerContext = ctx.request_context.lifespan_context + + # Inform about how to send the signal + return { + "signal_name": signal_name, + "workflow_id": workflow_id, + "description": description or f"Waiting for signal '{signal_name}'", + "status": "waiting_for_signal", + "timeout_seconds": timeout_seconds, + "instructions": "To respond to this signal, use the workflow_signal/send tool with the same signal_name and workflow_id.", + "related_tools": ["workflow_signal/send"], + } + + @mcp.tool(name="workflow_signal/send") + async def send_signal( + ctx: MCPContext, + signal_name: str, + workflow_id: str = None, + payload: Any = None, + ) -> Dict[str, bool]: + """ + Send a signal to a workflow. + + This can be used to respond to a workflow that is waiting for input or + to send a signal to control workflow execution. + + Args: + signal_name: The name of the signal to send. + workflow_id: Optional workflow ID to associate with the signal. + payload: Optional data to include with the signal. + + Returns: + Confirmation that the signal was sent. + """ + server_config: ServerContext = ctx.request_context.lifespan_context + executor = server_config.context.app.executor + + # Create and send the signal + signal = Signal(name=signal_name, workflow_id=workflow_id, payload=payload) + + try: + await executor.signal_bus.signal(signal) + return { + "success": True, + "message": f"Signal '{signal_name}' sent successfully", + } + except Exception as e: + logger.error(f"Error sending signal {signal_name}: {e}") + return {"success": False, "message": f"Error sending signal: {str(e)}"} + + @mcp.tool(name="workflows/wait_for_input") + async def workflow_wait_for_input( + ctx: MCPContext, workflow_id: str, description: str = "Provide input" + ) -> Dict[str, Any]: + """ + Get information about a workflow that is waiting for human input. + + This tool helps coordinate when a workflow is waiting for human input by + providing clear instructions on how to provide that input. + + Args: + workflow_id: The ID of the workflow. + description: Description of what input is needed. + + Returns: + Instructions on how to provide input to the waiting workflow. + """ + server_config: ServerContext = ctx.request_context.lifespan_context + + if workflow_id not in server_config.active_workflows: + raise ValueError(f"Workflow with ID '{workflow_id}' not found.") + + workflow = server_config.active_workflows[workflow_id] + + # Provide more helpful information about how to send the input + return { + "workflow_id": workflow_id, + "workflow_name": workflow.name, + "description": description, + "status": "waiting_for_input", + "instructions": "To provide input, use workflows/resume with the workflow_id and input_data parameters.", + "example": { + "tool": "workflows/resume", + "args": { + "workflow_id": workflow_id, + "input_data": "Example input data", + }, + }, + "tool_endpoints": [f"workflows/{workflow.name}/resume"], + } + + # endregion + + return mcp + + +# region per-Agent Tools + + +def create_agent_tools(mcp: FastMCP, server_context: ServerContext): + """ + Create agent-specific tools for existing agents. + This is called at server start to register specific endpoints for each agent. + """ + if not server_context: + logger.warning("Server config not available for creating agent tools") + return + + for _, agent in server_context.active_agents.items(): + create_agent_specific_tools(mcp, server_context, agent) + + +def create_agent_specific_tools( + mcp: FastMCP, server_context: ServerContext, agent: Agent +): + """Create specific tools for a given agent.""" + + # Format instruction - handle callable instructions + instruction = agent.instruction + if callable(instruction): + instruction = instruction({}) + + server_registry = server_context.context.server_registry + + # Add generate* tools for this agent + @mcp.tool( + name=f"agents/{agent.name}/generate", + description=f""" + Run the '{agent.name}' agent using the given message. + This is similar to generating an LLM completion. + + Agent Description: {instruction} + Connected Servers: {_get_server_descriptions_as_string(server_registry, agent.server_names)} + + Args: + message: The prompt to send to the agent. + request_params: Optional parameters for the request, such as max_tokens and model/model preferences. + + Returns: + The generated response from the agent. + """, + ) + async def generate( + ctx: MCPContext, + message: str | MCPMessageParam | List[MCPMessageParam], + request_params: RequestParams | None = None, + ) -> List[MCPMessageResult]: + return await _agent_generate(ctx, agent.name, message, request_params) + + @mcp.tool( + name=f"agents/{agent.name}/generate_str", + description=f""" + Run the '{agent.name}' agent using the given message and return the response as a string. + Use agents/{agent.name}/generate for results in the original format, and + use agents/{agent.name}/generate_structured for results conforming to a specific schema. + + Agent Description: {instruction} + Connected Servers: {_get_server_descriptions_as_string(server_registry, agent.server_names)} + + Args: + message: The prompt to send to the agent. + request_params: Optional parameters for the request, such as max_tokens and model/model preferences. + + Returns: + The generated response from the agent. + """, + ) + async def generate_str( + ctx: MCPContext, + message: str | MCPMessageParam | List[MCPMessageParam], + request_params: RequestParams | None = None, + ) -> str: + return await _agent_generate_str(ctx, agent.name, message, request_params) + + # Add structured generation tool for this agent + @mcp.tool( + name=f"agents/{agent.name}/generate_structured", + description=f""" + Run the '{agent.name}' agent using the given message and return a response that matches the given schema. + + Use agents/{agent.name}/generate for results in the original format, and + use agents/{agent.name}/generate_str for string result. + + Agent Description: {instruction} + Connected Servers: {_get_server_descriptions_as_string(server_registry, agent.server_names)} + + Args: + message: The prompt to send to the agent. + response_schema: The JSON schema that defines the shape to generate the response in. + This schema can be generated using type.schema_json() for a Pydantic model. + request_params: Optional parameters for the request, such as max_tokens and model/model preferences. + + Returns: + A dictionary representation of the structured response. + + Example: + response_schema: + {{ + "title": "UserProfile", + "type": "object", + "properties": {{ + "name": {{ + "title": "Name", + "type": "string" + }}, + "age": {{ + "title": "Age", + "type": "integer", + "minimum": 0 + }}, + "email": {{ + "title": "Email", + "type": "string", + "format": "email", + "pattern": "^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$" + }} + }}, + "required": [ + "name", + "age", + "email" + ] + }} + """, + ) + async def generate_structured( + ctx: MCPContext, + message: str, + response_schema: Dict[str, Any], + request_params: RequestParams | None = None, + ) -> Dict[str, Any]: + return await _agent_generate_structured( + ctx, agent.name, message, response_schema, request_params + ) + + +# endregion + +# region per-Workflow Tools + + +def create_workflow_tools(mcp: FastMCP, server_config: ServerContext): + """ + Create workflow-specific tools for registered workflows. + This is called at server start to register specific endpoints for each workflow. + """ + if not server_config: + logger.warning("Server config not available for creating workflow tools") + return + + for workflow_id, workflow_cls in server_config.context.app.workflows.items(): + create_workflow_specific_tools(mcp, workflow_id, workflow_cls) + + +def create_workflow_specific_tools(mcp: FastMCP, workflow_id: str, workflow_cls: Type): + """Create specific tools for a given workflow.""" + + # Get workflow documentation + doc = workflow_cls.__doc__ or "No description available" + doc = doc.strip() + + # Get workflow run method parameters using inspection + parameters = {} + if hasattr(workflow_cls, "run"): + sig = inspect.signature(workflow_cls.run) + for param_name, param in sig.parameters.items(): + if param_name != "self": + param_info = { + "type": str(param.annotation) + .replace("", ""), + "required": param.default == inspect.Parameter.empty, + } + if param.default != inspect.Parameter.empty: + param_info["default"] = param.default + parameters[param_name] = param_info + + # Create a run tool for this workflow + @mcp.tool(name=f"workflows/{workflow_id}/run") + async def workflow_specific_run( + ctx: MCPContext, + args: Optional[Dict[str, Any]] = None, + ) -> Dict[str, Any]: + """Run the workflow with the given arguments.""" + server_config: ServerContext = ctx.request_context.lifespan_context + app = server_config.context.app + + if workflow_id not in app.workflows: + raise ValueError(f"Workflow '{workflow_id}' not found.") + + # Create workflow instance + workflow = workflow_cls(executor=app.executor, name=workflow_id) + + # Generate workflow instance ID + instance_id = str(uuid.uuid4()) + + # Store workflow instance + server_config.active_workflows[instance_id] = workflow + + # Run workflow in separate task + run_args = args or {} + run_task = asyncio.create_task(workflow.run(**run_args)) + + # Store task + server_config.active_workflows[instance_id + "_task"] = run_task + + # Return information about the workflow + return { + "workflow_id": instance_id, + "workflow_name": workflow_id, + "status": "running", + "args": args, + "tool_endpoints": [ + f"workflows/{workflow_id}/get_status", + f"workflows/{workflow_id}/pause", + f"workflows/{workflow_id}/resume", + f"workflows/{workflow_id}/cancel", + ], + "message": f"Workflow {workflow_id} started with ID {instance_id}. Use the returned workflow_id with other workflow tools.", + } + + # Format parameter documentation + param_docs = [] + for param_name, param_info in parameters.items(): + default_info = ( + f" (default: {param_info.get('default', 'required')})" + if not param_info.get("required", True) + else "" + ) + param_docs.append( + f"- {param_name}: {param_info.get('type', 'Any')}{default_info}" + ) + + param_doc_str = "\n".join(param_docs) if param_docs else "- No parameters required" + + # Update the docstring + workflow_specific_run.__doc__ = f""" + Run the {workflow_id} workflow. + + Description: {doc} + + Parameters: + {param_doc_str} + + Args: + args: Dictionary containing the parameters for the workflow. + + Returns: + Information about the running workflow including its ID and metadata. + """ + + # Create a status tool for this workflow + @mcp.tool(name=f"workflows/{workflow_id}/get_status") + def workflow_specific_status( + ctx: MCPContext, workflow_instance_id: str + ) -> Dict[str, Any]: + """Get the status of a running workflow instance.""" + server_config: ServerContext = ctx.request_context.lifespan_context + + if workflow_instance_id not in server_config.active_workflows: + raise ValueError(f"Workflow instance '{workflow_instance_id}' not found.") + + workflow = server_config.active_workflows[workflow_instance_id] + if workflow_id != workflow.name: + raise ValueError( + f"Workflow instance '{workflow_instance_id}' is not a {workflow_id} workflow." + ) + + task = server_config.active_workflows.get(workflow_instance_id + "_task") + + status = { + "id": workflow_instance_id, + "name": workflow.name, + "running": task is not None and not task.done() if task else False, + "state": workflow.state.model_dump() if hasattr(workflow, "state") else {}, + "available_actions": ["pause", "resume", "cancel"] + if task and not task.done() + else [], + "tool_endpoints": [ + f"workflows/{workflow_id}/get_status", + ], + } + + # Add appropriate action endpoints based on status + if task and not task.done(): + status["tool_endpoints"].extend( + [ + f"workflows/{workflow_id}/pause", + f"workflows/{workflow_id}/resume", + f"workflows/{workflow_id}/cancel", + ] + ) + + if task and task.done(): + try: + result = task.result() + + # Convert result to a useful format + if hasattr(result, "model_dump"): + result_data = result.model_dump() + elif hasattr(result, "__dict__"): + result_data = result.__dict__ + else: + result_data = str(result) + + status["result"] = result_data + status["completed"] = True + status["error"] = None + except Exception as e: + status["result"] = None + status["completed"] = False + status["error"] = str(e) + status["exception_type"] = type(e).__name__ + + return status + + # Update the docstring + workflow_specific_status.__doc__ = f""" + Get the status of a running {workflow_id} workflow instance. + + Description: {doc} + + Args: + workflow_instance_id: The ID of the workflow instance to check. + + Returns: + A dictionary with detailed information about the workflow status. + """ + + # Create a pause tool for this workflow + @mcp.tool(name=f"workflows/{workflow_id}/pause") + async def workflow_specific_pause( + ctx: MCPContext, workflow_instance_id: str + ) -> bool: + """Pause a running workflow instance.""" + server_config: ServerContext = ctx.request_context.lifespan_context + + if workflow_instance_id not in server_config.active_workflows: + raise ValueError(f"Workflow instance '{workflow_instance_id}' not found.") + + workflow = server_config.active_workflows[workflow_instance_id] + if workflow_id != workflow.name: + raise ValueError( + f"Workflow instance '{workflow_instance_id}' is not a {workflow_id} workflow." + ) + + # Signal workflow to pause + try: + await server_config.context.app.executor.signal( + "pause", workflow_id=workflow_instance_id + ) + return True + except Exception as e: + logger.error(f"Error pausing workflow {workflow_instance_id}: {e}") + return False + + # Update the docstring + workflow_specific_pause.__doc__ = f""" + Pause a running {workflow_id} workflow instance. + + Description: {doc} + + Args: + workflow_instance_id: The ID of the workflow instance to pause. + + Returns: + True if the workflow was paused, False otherwise. + """ + + # Create a resume tool for this workflow + @mcp.tool(name=f"workflows/{workflow_id}/resume") + async def workflow_specific_resume( + ctx: MCPContext, workflow_instance_id: str, input_data: Optional[str] = None + ) -> bool: + """Resume a paused workflow instance.""" + server_config: ServerContext = ctx.request_context.lifespan_context + + if workflow_instance_id not in server_config.active_workflows: + raise ValueError(f"Workflow instance '{workflow_instance_id}' not found.") + + workflow = server_config.active_workflows[workflow_instance_id] + if workflow_id != workflow.name: + raise ValueError( + f"Workflow instance '{workflow_instance_id}' is not a {workflow_id} workflow." + ) + + # Signal workflow to resume + try: + signal = Signal( + name="resume", workflow_id=workflow_instance_id, payload=input_data + ) + await server_config.context.app.executor.signal_bus.signal(signal) + return True + except Exception as e: + logger.error(f"Error resuming workflow {workflow_instance_id}: {e}") + return False + + # Update the docstring + workflow_specific_resume.__doc__ = f""" + Resume a paused {workflow_id} workflow instance. + + Description: {doc} + + Args: + workflow_instance_id: The ID of the workflow instance to resume. + input_data: Optional input data to provide to the workflow. + + Returns: + True if the workflow was resumed, False otherwise. + """ + + # Create a cancel tool for this workflow + @mcp.tool(name=f"workflows/{workflow_id}/cancel") + async def workflow_specific_cancel( + ctx: MCPContext, workflow_instance_id: str + ) -> bool: + """Cancel a running workflow instance.""" + server_config: ServerContext = ctx.request_context.lifespan_context + + if workflow_instance_id not in server_config.active_workflows: + raise ValueError(f"Workflow instance '{workflow_instance_id}' not found.") + + workflow = server_config.active_workflows[workflow_instance_id] + if workflow_id != workflow.name: + raise ValueError( + f"Workflow instance '{workflow_instance_id}' is not a {workflow_id} workflow." + ) + + task = server_config.active_workflows.get(workflow_instance_id + "_task") + + if task and not task.done(): + # Cancel task + task.cancel() + + # Signal workflow to cancel + try: + await server_config.context.app.executor.signal( + "cancel", workflow_id=workflow_instance_id + ) + + # Remove from active workflows + server_config.active_workflows.pop(workflow_instance_id, None) + server_config.active_workflows.pop(workflow_instance_id + "_task", None) + + return True + except Exception as e: + logger.error(f"Error cancelling workflow {workflow_instance_id}: {e}") + return False + + return False + + # Update the docstring + workflow_specific_cancel.__doc__ = f""" + Cancel a running {workflow_id} workflow instance. + + Description: {doc} + + Args: + workflow_instance_id: The ID of the workflow instance to cancel. + + Returns: + True if the workflow was cancelled, False otherwise. + """ + + +# endregion + + +def _get_server_descriptions( + server_registry: ServerRegistry | None, server_names: List[str] +) -> List: + servers: List[dict[str, str]] = [] + if server_registry: + for server_name in server_names: + config = server_registry.get_server_config(server_name) + if config: + servers.append( + { + "name": config.name, + "description": config.description, + } + ) + else: + servers.append({"name": server_name}) + else: + servers = [{"name": server_name} for server_name in server_names] + + return servers + + +def _get_server_descriptions_as_string( + server_registry: ServerRegistry | None, server_names: List[str] +) -> str: + servers = _get_server_descriptions(server_registry, server_names) + + # Format each server's information as a string + server_strings = [] + for server in servers: + if "description" in server: + server_strings.append(f"{server['name']}: {server['description']}") + else: + server_strings.append(f"{server['name']}") + + # Join all server strings with a newline + return "\n".join(server_strings) + + +async def _agent_generate( + ctx: MCPContext, + agent_name: str, + message: str | MCPMessageParam | List[MCPMessageParam], + request_params: RequestParams | None = None, +) -> List[MCPMessageResult]: + """ + Run an agent using the given message. + This is similar to generating an LLM completion. + + Args: + agent_name: Name of the agent to use. This must be one of the names retrieved using agents/list tool endpoint. + message: The prompt to send to the agent. + request_params: Optional parameters for the request, such as max_tokens and model/model preferences. + + Returns: + The generated response from the agent. + """ + server_context: ServerContext = ctx.request_context.lifespan_context + + if agent_name not in server_context.active_agents: + raise ToolError(f"Agent not found: {agent_name}. Make sure the agent ") + + agent = server_context.active_agents[agent_name] + if not agent: + raise ToolError(f"Agent not found: {agent_name}") + elif not agent.llm: + raise ToolError( + f"Agent {agent_name} does not have an LLM attached. Make sure to call the attach_llm method where the agent is created." + ) + + # Convert the input message to the appropriate format + input_message: str | MessageParamT | List[MessageParamT] + if isinstance(message, str): + input_message = message + elif isinstance(message, list): + input_message = [agent.llm.from_mcp_message_param(msg) for msg in message] + else: + input_message = agent.llm.from_mcp_message_param(message) + + # Check if the agent is already initialized + async with agent: + result = await agent.llm.generate( + message=input_message, request_params=request_params + ) + return result + + +async def _agent_generate_str( + ctx: MCPContext, + agent_name: str, + message: str | MCPMessageParam | List[MCPMessageParam], + request_params: RequestParams | None = None, +) -> str: + """ + Run an agent using the given message and return the response as a string. + Use agents/generate for results in the original format, and + use agents/generate_structured for results conforming to a specific schema. + + Args: + agent_name: Name of the agent to use. This must be one of the names retrieved using agents/list tool endpoint. + message: The prompt to send to the agent. + request_params: Optional parameters for the request, such as max_tokens and model/model preferences. + + Returns: + The generated response from the agent. + """ + server_context: ServerContext = ctx.request_context.lifespan_context + + if agent_name not in server_context.active_agents: + raise ToolError(f"Agent not found: {agent_name}. Make sure the agent ") + + agent = server_context.active_agents[agent_name] + if not agent: + raise ToolError(f"Agent not found: {agent_name}") + elif not agent.llm: + raise ToolError( + f"Agent {agent_name} does not have an LLM attached. Make sure to call the attach_llm method where the agent is created." + ) + + # Convert the input message to the appropriate format + input_message: str | MessageParamT | List[MessageParamT] + if isinstance(message, str): + input_message = message + elif isinstance(message, list): + input_message = [agent.llm.from_mcp_message_param(msg) for msg in message] + else: + input_message = agent.llm.from_mcp_message_param(message) + + # Check if the agent is already initialized + async with agent: + result = await agent.llm.generate_str( + message=input_message, request_params=request_params + ) + return result + + +async def _agent_generate_structured( + ctx: MCPContext, + agent_name: str, + message: str | MCPMessageParam | List[MCPMessageParam], + response_schema: Dict[str, Any], + request_params: RequestParams | None = None, +) -> Dict[str, Any]: + """ + Generate a structured response from an agent that matches the given schema. + + Args: + agent_name: Name of the agent to use. This must be one of the names retrieved using agents/list tool endpoint. + message: The prompt to send to the agent. + response_schema: The JSON schema that defines the shape to generate the response in. + This schema can be generated using type.schema_json() for a Pydantic model. + request_params: Optional parameters for the request, such as max_tokens and model/model preferences. + + Returns: + A dictionary representation of the structured response. + + Example: + response_schema: + { + "title": "UserProfile", + "type": "object", + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "age": { + "title": "Age", + "type": "integer", + "minimum": 0 + }, + "email": { + "title": "Email", + "type": "string", + "format": "email", + "pattern": "^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$" + } + }, + "required": [ + "name", + "age", + "email" + ] + } + """ + server_context: ServerContext = ctx.request_context.lifespan_context + + if agent_name not in server_context.active_agents: + raise ToolError(f"Agent not found: {agent_name}. Make sure the agent ") + + agent = server_context.active_agents[agent_name] + if not agent: + raise ToolError(f"Agent not found: {agent_name}") + elif not agent.llm: + raise ToolError( + f"Agent {agent_name} does not have an LLM attached. Make sure to call the attach_llm method where the agent is created." + ) + + # Convert the input message to the appropriate format + input_message: str | MessageParamT | List[MessageParamT] + if isinstance(message, str): + input_message = message + elif isinstance(message, list): + input_message = [agent.llm.from_mcp_message_param(msg) for msg in message] + else: + input_message = agent.llm.from_mcp_message_param(message) + + response_model = create_model_from_schema(response_schema) + + # Check if the agent is already initialized + async with agent: + result = await agent.llm.generate_structured( + message=input_message, + response_model=response_model, + request_params=request_params, + ) + # Convert to dictionary for JSON serialization + return result.model_dump(mode="json") diff --git a/src/mcp_agent/app_server_types.py b/src/mcp_agent/app_server_types.py new file mode 100644 index 000000000..339e636aa --- /dev/null +++ b/src/mcp_agent/app_server_types.py @@ -0,0 +1,49 @@ +from typing import Any, Dict, List, Optional, Type +from pydantic import BaseModel, Field, create_model +# from pydantic.json_schema import model_from_schema + +from mcp.types import ( + CreateMessageResult, + SamplingMessage, +) + +MCPMessageParam = SamplingMessage +MCPMessageResult = CreateMessageResult + + +def create_model_from_schema(json_schema: Dict[str, Any]) -> Type[BaseModel]: + """Create a Pydantic model from a JSON schema""" + model_name = json_schema.get("title", "DynamicModel") + properties = json_schema.get("properties", {}) + required = json_schema.get("required", []) + + field_definitions = {} + + for field_name, field_schema in properties.items(): + # Get field type + field_type = str # Default to string + schema_type = field_schema.get("type") + + if schema_type == "integer": + field_type = int + elif schema_type == "number": + field_type = float + elif schema_type == "boolean": + field_type = bool + elif schema_type == "array": + field_type = List[Any] + elif schema_type == "object": + field_type = Dict[str, Any] + + # Handle optional fields + if field_name not in required: + field_type = Optional[field_type] + + # Create field with basic info + field_info = {} + if "description" in field_schema: + field_info["description"] = field_schema["description"] + + field_definitions[field_name] = (field_type, Field(**field_info)) + + return create_model(model_name, **field_definitions) diff --git a/src/mcp_agent/config.py b/src/mcp_agent/config.py index d3de28388..098af8705 100644 --- a/src/mcp_agent/config.py +++ b/src/mcp_agent/config.py @@ -78,8 +78,7 @@ class MCPServerSettings(BaseModel): env: Dict[str, str] | None = None """Environment variables to pass to the server process.""" - env: Dict[str, str] | None = None - """Environment variables to pass to the server process.""" + model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True) class MCPSettings(BaseModel): diff --git a/src/mcp_agent/context.py b/src/mcp_agent/context.py index 033ee79c2..4c22c9f1a 100644 --- a/src/mcp_agent/context.py +++ b/src/mcp_agent/context.py @@ -40,10 +40,12 @@ if TYPE_CHECKING: from mcp_agent.human_input.types import HumanInputCallback from mcp_agent.executor.workflow_signal import SignalWaitCallback + from mcp_agent.app import MCPApp else: # Runtime placeholders for the types HumanInputCallback = Any SignalWaitCallback = Any + MCPApp = Any logger = get_logger(__name__) @@ -61,6 +63,7 @@ class Context(BaseModel): upstream_session: Optional[ServerSession] = None # TODO: saqadri - figure this out model_selector: Optional[ModelSelector] = None session_id: str | None = None + app: Optional["MCPApp"] = None # Registries server_registry: Optional[ServerRegistry] = None diff --git a/src/mcp_agent/executor/workflow.py b/src/mcp_agent/executor/workflow.py index c00e16370..0010841a3 100644 --- a/src/mcp_agent/executor/workflow.py +++ b/src/mcp_agent/executor/workflow.py @@ -11,6 +11,7 @@ from pydantic import BaseModel, ConfigDict, Field from mcp_agent.executor.executor import Executor +from mcp_agent.logging.logger import get_logger T = TypeVar("T") @@ -47,6 +48,10 @@ class Workflow(ABC, Generic[T]): """ Base class for user-defined workflows. Handles execution and state management. + + Workflows represent user-defined application logic modules that can use Agents and AugmentedLLMs. + Typically, workflows are registered with an MCPApp and can be exposed as MCP tools via app_server.py. + Some key notes: - To enable the executor engine to recognize and orchestrate the workflow, - the class MUST be decorated with @workflow. @@ -66,24 +71,32 @@ def __init__( self.executor = executor self.name = name or self.__class__.__name__ self.init_kwargs = kwargs - # TODO: handle logging - # self._logger = logging.getLogger(self.name) + self._logger = get_logger(f"workflow.{self.name}") # A simple workflow state object # If under Temporal, storing it as a field on this class # means it can be replayed automatically - self.state = WorkflowState(name=name, metadata=metadata or {}) + self.state = WorkflowState(metadata=metadata or {}) @abstractmethod async def run(self, *args: Any, **kwargs: Any) -> "WorkflowResult[T]": """ Main workflow implementation. Must be overridden by subclasses. + + This is where the user-defined application logic goes. Typically, this involves: + 1. Setting up Agents and attaching LLMs to them + 2. Executing operations using the Agents and their LLMs + 3. Processing results and returning them + + Returns: + WorkflowResult containing the output of the workflow """ async def update_state(self, **kwargs): """Syntactic sugar to update workflow state.""" for key, value in kwargs.items(): - self.state[key] = value + if hasattr(self.state, "__getitem__"): + self.state[key] = value setattr(self.state, key, value) self.state.updated_at = datetime.utcnow().timestamp() @@ -97,6 +110,30 @@ async def wait_for_input(self, description: str = "Provide input") -> str: "human_input", description=description ) + async def initialize(self): + """ + Optional initialization method that will be called before run. + Override this to set up any resources needed by the workflow. + """ + self.state.status = "initializing" + self._logger.debug(f"Initializing workflow {self.name}") + + async def cleanup(self): + """ + Optional cleanup method that will be called after run. + Override this to clean up any resources used by the workflow. + """ + self._logger.debug(f"Cleaning up workflow {self.name}") + + async def __aenter__(self): + """Support for async context manager pattern.""" + await self.initialize() + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + """Support for async context manager pattern.""" + await self.cleanup() + # ############################ # # Example: DocumentWorkflow diff --git a/src/mcp_agent/mcp/mcp_agent_server.py b/src/mcp_agent/mcp/mcp_agent_server.py deleted file mode 100644 index 0c4d3e388..000000000 --- a/src/mcp_agent/mcp/mcp_agent_server.py +++ /dev/null @@ -1,56 +0,0 @@ -import asyncio -from mcp.server import NotificationOptions -from mcp.server.fastmcp import FastMCP -from mcp.server.stdio import stdio_server -from mcp_agent.executor.temporal import get_temporal_client -from mcp_agent.telemetry.tracing import setup_tracing - -app = FastMCP("mcp-agent-server") - -setup_tracing("mcp-agent-server") - - -async def run(): - async with stdio_server() as (read_stream, write_stream): - await app._mcp_server.run( - read_stream, - write_stream, - app._mcp_server.create_initialization_options( - notification_options=NotificationOptions( - tools_changed=True, resources_changed=True - ) - ), - ) - - -@app.tool -async def run_workflow(query: str): - """Run the workflow given its name or id""" - pass - - -@app.tool -async def pause_workflow(workflow_id: str): - """Pause a running workflow.""" - temporal_client = await get_temporal_client() - handle = temporal_client.get_workflow_handle(workflow_id) - await handle.signal("pause") - - -@app.tool -async def resume_workflow(workflow_id: str): - """Resume a paused workflow.""" - temporal_client = await get_temporal_client() - handle = temporal_client.get_workflow_handle(workflow_id) - await handle.signal("resume") - - -async def provide_user_input(workflow_id: str, input_data: str): - """Provide user/human input to a waiting workflow step.""" - temporal_client = await get_temporal_client() - handle = temporal_client.get_workflow_handle(workflow_id) - await handle.signal("human_input", input_data) - - -if __name__ == "__main__": - asyncio.run(run()) diff --git a/src/mcp_agent/workflows/llm/augmented_llm_openai.py b/src/mcp_agent/workflows/llm/augmented_llm_openai.py index 2c9e78f38..c3a703997 100644 --- a/src/mcp_agent/workflows/llm/augmented_llm_openai.py +++ b/src/mcp_agent/workflows/llm/augmented_llm_openai.py @@ -199,7 +199,11 @@ async def generate(self, message, request_params: RequestParams | None = None): responses.append(message) # Fixes an issue with openai validation that does not allow non alphanumeric characters, dashes, and underscores - sanitized_name = re.sub(r"[^a-zA-Z0-9_-]", "_", self.name) if isinstance(self.name, str) else None + sanitized_name = ( + re.sub(r"[^a-zA-Z0-9_-]", "_", self.name) + if isinstance(self.name, str) + else None + ) converted_message = self.convert_message_to_message_param( message, name=sanitized_name From daf9b4fd2a42ee422d4dd0c16f2b17b5464b409b Mon Sep 17 00:00:00 2001 From: Sarmad Qadri Date: Wed, 2 Apr 2025 12:02:50 -0400 Subject: [PATCH 2/9] Some more WIP to get app server set up for workflows --- examples/workflow_mcp_server/README.md | 139 ++++++- examples/workflow_mcp_server/server.py | 219 +++++++++- src/mcp_agent/agents/agent_config.py | 224 +++++++++++ src/mcp_agent/app.py | 94 ++++- src/mcp_agent/app_server.py | 368 ++++++++++++++--- src/mcp_agent/executor/workflow.py | 46 ++- src/mcp_agent/fast_app.py | 532 +++++++++++++++++++++++++ 7 files changed, 1525 insertions(+), 97 deletions(-) create mode 100644 src/mcp_agent/agents/agent_config.py create mode 100644 src/mcp_agent/fast_app.py diff --git a/examples/workflow_mcp_server/README.md b/examples/workflow_mcp_server/README.md index a70b67cdb..ede9352f4 100644 --- a/examples/workflow_mcp_server/README.md +++ b/examples/workflow_mcp_server/README.md @@ -1,34 +1,49 @@ # Workflow MCP Server Example -This example demonstrates how to: +This example demonstrates three approaches to creating agents and workflows: -1. Create custom workflows using the MCP Agent framework -2. Expose those workflows as MCP tools via an MCP server -3. Connect to the workflow MCP server from a client application +1. Traditional workflow-based approach with manual agent creation +2. Programmatic agent configuration using AgentConfig +3. Declarative agent configuration using FastMCPApp decorators + +All three approaches can use `app_server.py` to expose the agents and workflows as an MCP server. ## Concepts Demonstrated - Using the `Workflow` base class to create custom workflows - Registering workflows with an `MCPApp` -- Exposing workflows as MCP tools using `app_server.py` +- Creating and registering agent configurations with both programmatic and declarative approaches +- Exposing workflows and agents as MCP tools using `app_server.py` - Connecting to a workflow server using `gen_client` -- Running workflows and monitoring their progress +- Lazy instantiation of agents from configurations when their tools are called + +## Components in this Example -## Workflows in this Example +1. **DataProcessorWorkflow**: A traditional workflow that processes data in three steps: -1. **DataProcessorWorkflow**: A workflow that processes data in three steps: - Finding and retrieving content from a source (file or URL) - Analyzing the content - Formatting the results -2. **SummarizationWorkflow**: A workflow that summarizes text content: +2. **SummarizationWorkflow**: A traditional workflow that summarizes text content: + - Generates a concise summary - Extracts key points - Returns structured data +3. **Research Team**: A parallel workflow created using the agent configuration system: + + - Uses a fan-in/fan-out pattern with multiple specialized agents + - Demonstrates declarative workflow pattern configuration + +4. **Specialist Router**: A router workflow created using FastMCPApp decorators: + - Routes requests to specialized agents based on content + - Shows how to use the decorator syntax for workflow creation + ## How to Run 1. Copy the example secrets file: + ``` cp mcp_agent.secrets.yaml.example mcp_agent.secrets.yaml ``` @@ -49,19 +64,37 @@ This example demonstrates how to: ## Understanding the Code -### Workflow Definition +### Approach 1: Traditional Workflow Definition Workflows are defined by subclassing the `Workflow` base class and implementing: + - The `run` method containing the main workflow logic -- Optional `initialize` and `cleanup` methods for setup and teardown +- `initialize` and `cleanup` methods for setup and teardown +- Optionally a custom `create` class method for specialized instantiation ```python class DataProcessorWorkflow(Workflow[str]): + @classmethod + async def create(cls, executor: Executor, name: str | None = None, **kwargs: Any) -> "DataProcessorWorkflow": + # Custom instantiation logic + workflow = cls(executor=executor, name=name, **kwargs) + await workflow.initialize() + return workflow + + async def initialize(self): + # Set up resources like agents and LLMs + async def run(self, source: str, analysis_prompt: Optional[str] = None, output_format: Optional[str] = None) -> WorkflowResult[str]: # Workflow implementation... + + async def cleanup(self): + # Clean up resources ``` -### Registering a Workflow +The base `Workflow` class provides a default implementation of `create()` that handles basic initialization, but workflows can override this for specialized setup. Our example shows both approaches: + +1. `DataProcessorWorkflow` overrides the `create()` method to implement custom initialization +2. `SummarizationWorkflow` uses the default implementation from the base class Workflows are registered with the MCPApp using the `@app.workflow` decorator: @@ -73,13 +106,87 @@ class DataProcessorWorkflowRegistered(DataProcessorWorkflow): pass ``` -### Exposing Workflows as Tools +### Approach 2: Programmatic Agent Configuration + +Agent configurations can be created programmatically using Pydantic models: + +```python +# Create a basic agent configuration +research_agent_config = AgentConfig( + name="researcher", + instruction="You are a helpful research assistant that finds information and presents it clearly.", + server_names=["fetch", "filesystem"], + llm_config=AugmentedLLMConfig( + factory=OpenAIAugmentedLLM, + model="gpt-4o", + temperature=0.7 + ) +) + +# Create a parallel workflow configuration +research_team_config = AgentConfig( + name="research_team", + instruction="You are a research team that produces high-quality, accurate content.", + parallel_config=ParallelWorkflowConfig( + fan_in_agent="editor", + fan_out_agents=["summarizer", "fact_checker"], + concurrent=True + ) +) + +# Register the configurations with the app +app.register_agent_config(research_agent_config) +app.register_agent_config(research_team_config) +``` + +### Approach 3: Declarative Agent Configuration with FastMCPApp + +FastMCPApp provides decorators for creating agent configurations in a more declarative style: + +```python +fast_app = FastMCPApp(name="fast_workflow_mcp_server") + +# Basic agent with OpenAI LLM +@fast_app.agent("assistant", "You are a helpful assistant that answers questions concisely.", + server_names=["calculator"]) +def assistant_config(config): + config.llm_config = AugmentedLLMConfig( + factory=OpenAIAugmentedLLM, + model="gpt-4o", + temperature=0.7 + ) + return config + +# Router workflow with specialist agents +@fast_app.router("specialist_router", "You route requests to the appropriate specialist.", + agent_names=["mathematician", "programmer", "writer"]) +def router_config(config): + config.llm_config = AugmentedLLMConfig( + factory=OpenAIAugmentedLLM, + model="gpt-4o" + ) + config.router_config.top_k = 1 + return config +``` + +### Exposing Workflows and Agents as Tools + +The MCP server automatically exposes both workflows and agent configurations as tools: + +**Workflow tools**: -The MCP server automatically exposes workflows as tools, creating endpoints for: - Running a workflow: `workflows/{workflow_id}/run` - Checking status: `workflows/{workflow_id}/get_status` - Controlling workflow execution: `workflows/{workflow_id}/pause`, `workflows/{workflow_id}/resume`, `workflows/{workflow_id}/cancel` +**Agent tools**: + +- Running an agent: `agents/{agent_name}/generate` +- Getting string response: `agents/{agent_name}/generate_str` +- Getting structured response: `agents/{agent_name}/generate_structured` + +Agent configurations are lazily instantiated when their tools are called. If the agent is already active, the existing instance is reused. + ### Connecting to the Workflow Server The client connects to the workflow server using the `gen_client` function: @@ -87,4 +194,6 @@ The client connects to the workflow server using the `gen_client` function: ```python async with gen_client("workflow_server", context.server_registry) as server: # Connect and use the server -``` \ No newline at end of file +``` + +You can then call both workflow and agent tools through this client connection. diff --git a/examples/workflow_mcp_server/server.py b/examples/workflow_mcp_server/server.py index 5527e37c4..5bfdfae4a 100644 --- a/examples/workflow_mcp_server/server.py +++ b/examples/workflow_mcp_server/server.py @@ -1,13 +1,33 @@ +""" +Workflow MCP Server Example + +This example demonstrates three approaches to creating agents and workflows: +1. Traditional workflow-based approach with manual agent creation +2. Programmatic agent configuration using AgentConfig +3. Declarative agent configuration using FastMCPApp decorators +""" + import asyncio import os +import logging from typing import Dict, Any, Optional -from mcp.server.helpers.stdio import stdio_server -from mcp_agent.app import MCPApp +from mcp_agent.fast_app import FastMCPApp from mcp_agent.app_server import create_mcp_server_for_app from mcp_agent.agents.agent import Agent +from mcp_agent.agents.agent_config import ( + AgentConfig, + AugmentedLLMConfig, + ParallelWorkflowConfig, +) from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM +from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM from mcp_agent.executor.workflow import Workflow, WorkflowResult +from mcp_agent.executor.executor import Executor + +# Initialize logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) class DataProcessorWorkflow(Workflow[str]): @@ -16,6 +36,30 @@ class DataProcessorWorkflow(Workflow[str]): This workflow demonstrates how to use multiple agents to process data in a sequence. """ + @classmethod + async def create( + cls, executor: Executor, name: str | None = None, **kwargs: Any + ) -> "DataProcessorWorkflow": + """ + Factory method to create and initialize the DataProcessorWorkflow. + Demonstrates how to override the default create method for specialized initialization. + + Args: + executor: The executor to use + name: Optional workflow name + **kwargs: Additional parameters for customization + + Returns: + An initialized DataProcessorWorkflow instance + """ + # Create the workflow instance + workflow = cls(executor=executor, name=name or "data_processor", **kwargs) + + # Initialize it (which will set up agents, etc.) + await workflow.initialize() + + return workflow + async def initialize(self): await super().initialize() self.state.status = "ready" @@ -51,9 +95,9 @@ async def initialize(self): async def cleanup(self): # Clean up resources - await self.finder_agent.cleanup() - await self.analyzer_agent.cleanup() - await self.formatter_agent.cleanup() + await self.finder_agent.shutdown() + await self.analyzer_agent.shutdown() + await self.formatter_agent.shutdown() await super().cleanup() async def run( @@ -122,7 +166,9 @@ async def run( "output_format": format_instruction, "workflow_completed": True, }, - start_time=self.state.metadata.get("start_time"), + start_time=self.state.metadata.get( + "start_time" + ), # TODO: saqadri (MAC) - fix end_time=self.state.updated_at, ) @@ -133,6 +179,9 @@ class SummarizationWorkflow(Workflow[Dict[str, Any]]): """ A workflow that summarizes text content with customizable parameters. This workflow demonstrates how to create a simple summarization pipeline. + + This workflow uses the default create() implementation from the base Workflow class, + showing that it's not necessary to override create() in every workflow. """ async def initialize(self): @@ -152,7 +201,7 @@ async def initialize(self): self.summarizer_llm = await self.summarizer_agent.attach_llm(OpenAIAugmentedLLM) async def cleanup(self): - await self.summarizer_agent.cleanup() + await self.summarizer_agent.shutdown() await super().cleanup() async def run( @@ -223,35 +272,179 @@ async def run( return result -# Initialize the app -app = MCPApp(name="workflow_mcp_server") +# Create a single FastMCPApp instance (which extends MCPApp) +app = FastMCPApp(name="workflow_mcp_server") + +# ------------------------------------------------------------------------- +# Approach 1: Traditional workflow registration with @app.workflow decorator +# ------------------------------------------------------------------------- # Register workflows with the app @app.workflow class DataProcessorWorkflowRegistered(DataProcessorWorkflow): + """Data processing workflow registered with the app.""" + pass @app.workflow class SummarizationWorkflowRegistered(SummarizationWorkflow): + """Summarization workflow registered with the app.""" + pass +# ------------------------------------------------------------------------- +# Approach 2: Programmatic agent configuration with AgentConfig +# ------------------------------------------------------------------------- + +# Create a basic agent configuration +research_agent_config = AgentConfig( + name="researcher", + instruction="You are a helpful research assistant that finds information and presents it clearly.", + server_names=["fetch", "filesystem"], + llm_config=AugmentedLLMConfig( + factory=OpenAIAugmentedLLM, + model="gpt-4o", + temperature=0.7, + provider_params={"max_tokens": 2000}, + ), +) + +# Create component agents for a parallel workflow +programmatic_summarizer_config = AgentConfig( + name="programmatic_summarizer", + instruction="You are specialized in summarizing information clearly and concisely.", + server_names=["fetch"], + llm_config=AugmentedLLMConfig( + factory=AnthropicAugmentedLLM, model="claude-3-sonnet-20240229" + ), +) + +programmatic_fact_checker_config = AgentConfig( + name="programmatic_fact_checker", + instruction="You verify facts and identify potential inaccuracies in information.", + server_names=["fetch", "filesystem"], + llm_config=AugmentedLLMConfig(factory=OpenAIAugmentedLLM, model="gpt-4o"), +) + +programmatic_editor_config = AgentConfig( + name="programmatic_editor", + instruction="You refine and improve text, focusing on clarity and readability.", + server_names=[], + llm_config=AugmentedLLMConfig(factory=OpenAIAugmentedLLM, model="gpt-4o"), +) + +# Create a parallel workflow configuration +programmatic_research_team_config = AgentConfig( + name="programmatic_research_team", + instruction="You are a research team that produces high-quality, accurate content.", + server_names=["fetch", "filesystem"], + llm_config=AugmentedLLMConfig( + factory=AnthropicAugmentedLLM, model="claude-3-opus-20240229" + ), + parallel_config=ParallelWorkflowConfig( + fan_in_agent="programmatic_editor", + fan_out_agents=["programmatic_summarizer", "programmatic_fact_checker"], + concurrent=True, + ), +) + +# Register the configurations with the app using programmatic method +app.register_agent_config(research_agent_config) +app.register_agent_config(programmatic_summarizer_config) +app.register_agent_config(programmatic_fact_checker_config) +app.register_agent_config(programmatic_editor_config) +app.register_agent_config(programmatic_research_team_config) + +# ------------------------------------------------------------------------- +# Approach 3: Declarative agent configuration with FastMCPApp decorators +# ------------------------------------------------------------------------- + + +# Basic agent with OpenAI LLM +@app.agent( + "assistant", + "You are a helpful assistant that answers questions concisely.", + server_names=["calculator"], +) +def assistant_config(config): + # Configure the LLM to use + config.llm_config = AugmentedLLMConfig( + factory=OpenAIAugmentedLLM, model="gpt-4o", temperature=0.7 + ) + return config + + +# Component agents for router workflow +@app.agent( + "mathematician", + "You solve mathematical problems with precision.", + server_names=["calculator"], +) +def mathematician_config(config): + config.llm_config = AugmentedLLMConfig(factory=OpenAIAugmentedLLM, model="gpt-4o") + return config + + +@app.agent( + "programmer", + "You write and debug code in various programming languages.", + server_names=["filesystem"], +) +def programmer_config(config): + config.llm_config = AugmentedLLMConfig(factory=OpenAIAugmentedLLM, model="gpt-4o") + return config + + +@app.agent("writer", "You write creative and engaging content.", server_names=[]) +def writer_config(config): + config.llm_config = AugmentedLLMConfig( + factory=AnthropicAugmentedLLM, model="claude-3-sonnet-20240229" + ) + return config + + +# Router workflow using the decorator syntax +@app.router( + "specialist_router", + "You route requests to the appropriate specialist.", + agent_names=["mathematician", "programmer", "writer"], +) +def router_config(config): + config.llm_config = AugmentedLLMConfig(factory=OpenAIAugmentedLLM, model="gpt-4o") + # Configure top_k for the router + config.router_config.top_k = 1 + return config + + async def main(): # Initialize the app await app.initialize() # Add the current directory to the filesystem server's args if needed context = app.context - context.config.mcp.servers["filesystem"].args.extend([os.getcwd()]) + if "filesystem" in context.config.mcp.servers: + context.config.mcp.servers["filesystem"].args.extend([os.getcwd()]) + + # Log registered workflows and agent configurations + logger.info(f"Creating MCP server for {app.name}") + + logger.info("Registered workflows:") + for workflow_id in app.workflows: + logger.info(f" - {workflow_id}") + + logger.info("Registered agent configurations:") + for name, config in app.agent_configs.items(): + workflow_type = config.get_workflow_type() or "basic" + logger.info(f" - {name} ({workflow_type})") - # Create the MCP server + # Create the MCP server that exposes both workflows and agent configurations mcp_server = create_mcp_server_for_app(app) # Run the server - async with stdio_server() as (read_stream, write_stream): - await mcp_server.run(read_stream, write_stream) + await mcp_server.run_stdio_async() if __name__ == "__main__": diff --git a/src/mcp_agent/agents/agent_config.py b/src/mcp_agent/agents/agent_config.py new file mode 100644 index 000000000..dd5d20fb3 --- /dev/null +++ b/src/mcp_agent/agents/agent_config.py @@ -0,0 +1,224 @@ +""" +Agent configuration classes for declarative agent definition. +""" + +from typing import TYPE_CHECKING +from pydantic import BaseModel, Field, ConfigDict +from typing import Any, Callable, Dict, List, Literal, Optional, TypeVar, Union, Generic + +from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM, RequestParams +from mcp_agent.human_input.types import HumanInputCallback + +if TYPE_CHECKING: + from mcp_agent.agents.agent import Agent + +# Define TypeVar for LLM types +LLM = TypeVar("LLM", bound=AugmentedLLM) + + +class AugmentedLLMConfig(BaseModel, Generic[LLM]): + """ + Configuration for creating an AugmentedLLM instance. + Provides type-safe configuration for different LLM providers. + """ + + # The factory function or class that creates the LLM + factory: Callable[..., LLM] + + # Common parameters that apply to most LLMs + model: Optional[str] = None + temperature: Optional[float] = None + max_tokens: Optional[int] = None + + # Model-specific parameters + provider_params: Dict[str, Any] = Field(default_factory=dict) + + # Request parameters used in generate calls + default_request_params: Optional[RequestParams] = None + + model_config = ConfigDict(arbitrary_types_allowed=True) + + async def create_llm(self) -> LLM: + """ + Create an LLM instance using this configuration. + + Returns: + An instance of the configured LLM type + """ + # Combine common parameters with provider-specific parameters + params = {} + if self.model: + params["model"] = self.model + if self.temperature is not None: + params["temperature"] = self.temperature + if self.max_tokens is not None: + params["max_tokens"] = self.max_tokens + + # Add any additional provider-specific parameters + params.update(self.provider_params) + + # Create the LLM instance + return self.factory(**params) + + +class BasicAgentConfig(BaseModel): + """ + Configuration for a basic agent with an LLM. + This contains all the parameters needed to create a standard Agent + without any complex workflow pattern. + """ + + name: str + instruction: Union[str, Callable[[Dict], str]] = "You are a helpful agent." + server_names: List[str] = Field(default_factory=list) + functions: List[Callable] = Field(default_factory=list) + connection_persistence: bool = True + human_input_callback: Optional[HumanInputCallback] = None + llm_config: Optional[AugmentedLLMConfig] = None + extra_kwargs: Dict[str, Any] = Field(default_factory=dict) + + model_config = ConfigDict(arbitrary_types_allowed=True) + + +# Type-safe configs for each workflow pattern + + +class ParallelWorkflowConfig(BaseModel): + """Type-safe configuration for ParallelLLM workflow pattern.""" + + fan_in_agent: str # Name of the agent to use for fan-in + fan_out_agents: List[str] # Names of agents to use for fan-out + concurrent: bool = True + synchronize_fan_out_models: bool = False + extra_params: Dict[str, Any] = Field(default_factory=dict) + + +class OrchestratorWorkflowConfig(BaseModel): + """Type-safe configuration for Orchestrator workflow pattern.""" + + available_agents: List[str] # Names of agents available to the orchestrator + max_iterations: int = 10 + planner_agent: Optional[str] = None # Optional custom planner agent + extra_params: Dict[str, Any] = Field(default_factory=dict) + + +class RouterWorkflowConfig(BaseModel): + """Type-safe configuration for Router workflow pattern.""" + + agent_names: List[str] # Names of agents to route between + top_k: int = 1 + router_type: Literal["llm", "embedding"] = "llm" + embedding_model: Optional[str] = None # For embedding-based router + extra_params: Dict[str, Any] = Field(default_factory=dict) + + +class EvaluatorOptimizerWorkflowConfig(BaseModel): + """Type-safe configuration for Evaluator-Optimizer workflow pattern.""" + + evaluator_agent: str # Name of the agent to use as evaluator + optimizer_agent: str # Name of the agent to use as optimizer + min_rating: str = "excellent" # Minimum quality rating to accept + max_iterations: int = 5 + extra_params: Dict[str, Any] = Field(default_factory=dict) + + +class SwarmWorkflowConfig(BaseModel): + """Type-safe configuration for Swarm workflow pattern.""" + + agents: List[str] # Names of agents in the swarm + context_variables: Dict[str, Any] = Field(default_factory=dict) + extra_params: Dict[str, Any] = Field(default_factory=dict) + + +class AgentConfig(BaseModel): + """ + Complete configuration for an agent, which can be basic or use a complex workflow pattern. + Only one workflow configuration should be set. + """ + + name: str + instruction: Union[str, Callable[[Dict], str]] = "You are a helpful agent." + server_names: List[str] = Field(default_factory=list) + functions: List[Callable] = Field(default_factory=list) + connection_persistence: bool = True + human_input_callback: Optional[HumanInputCallback] = None + + # LLM config for either basic agent or workflow LLM factory + llm_config: Optional[AugmentedLLMConfig] = None + + # Workflow configuration - only one should be set + parallel_config: Optional[ParallelWorkflowConfig] = None + orchestrator_config: Optional[OrchestratorWorkflowConfig] = None + router_config: Optional[RouterWorkflowConfig] = None + evaluator_optimizer_config: Optional[EvaluatorOptimizerWorkflowConfig] = None + swarm_config: Optional[SwarmWorkflowConfig] = None + + # Additional kwargs + extra_kwargs: Dict[str, Any] = Field(default_factory=dict) + + model_config = ConfigDict(arbitrary_types_allowed=True) + + def create_agent(self, context=None) -> "Agent": + """ + Create a basic agent instance. + This doesn't initialize the agent or attach an LLM. + + Args: + context: Optional Context to pass to the Agent + + Returns: + Instantiated Agent object without initialization + """ + from mcp_agent.agents.agent import Agent + + return Agent( + name=self.name, + instruction=self.instruction, + server_names=self.server_names, + functions=self.functions, + connection_persistence=self.connection_persistence, + human_input_callback=self.human_input_callback, + context=context, + **self.extra_kwargs, + ) + + def get_workflow_type(self) -> Optional[str]: + """ + Get the type of workflow this agent uses, if any. + + Returns: + String identifier of workflow type or None for basic agents + """ + configs = [ + ("parallel", self.parallel_config), + ("orchestrator", self.orchestrator_config), + ("router", self.router_config), + ("evaluator_optimizer", self.evaluator_optimizer_config), + ("swarm", self.swarm_config), + ] + + for name, config in configs: + if config is not None: + return name + + return None + + def get_workflow_config(self) -> Optional[Any]: + """ + Get the workflow configuration object. + + Returns: + The appropriate workflow configuration object or None + """ + workflow_type = self.get_workflow_type() + if workflow_type == "parallel": + return self.parallel_config + elif workflow_type == "orchestrator": + return self.orchestrator_config + elif workflow_type == "router": + return self.router_config + elif workflow_type == "evaluator_optimizer": + return self.evaluator_optimizer_config + elif workflow_type == "swarm": + return self.swarm_config + return None diff --git a/src/mcp_agent/app.py b/src/mcp_agent/app.py index a37faca51..ec5576fdf 100644 --- a/src/mcp_agent/app.py +++ b/src/mcp_agent/app.py @@ -1,4 +1,4 @@ -from typing import Any, Dict, Optional, Type, TypeVar, Callable +from typing import Any, Dict, Optional, Type, TypeVar, Callable, Tuple, TYPE_CHECKING from datetime import timedelta import asyncio import sys @@ -14,6 +14,11 @@ from mcp_agent.human_input.types import HumanInputCallback from mcp_agent.human_input.handler import console_input_callback from mcp_agent.workflows.llm.llm_selector import ModelSelector +from mcp_agent.agents.agent import Agent +from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM + +if TYPE_CHECKING: + from mcp_agent.agents.agent_config import AgentConfig R = TypeVar("R") @@ -69,6 +74,9 @@ def __init__( self._model_selector = model_selector self._workflows: Dict[str, Type] = {} # id to workflow class + self._agent_configs: Dict[ + str, "AgentConfig" + ] = {} # name to agent configuration self._logger = None self._context: Optional[Context] = None self._initialized = False @@ -328,3 +336,87 @@ def is_workflow_task(self, func: Callable[..., Any]) -> bool: Check if a function is marked as a workflow task. This gets set for functions that are decorated with @workflow_task.""" return bool(getattr(func, "is_workflow_task", False)) + + @property + def agent_configs(self) -> Dict[str, "AgentConfig"]: + """Get the dictionary of registered agent configurations.""" + return self._agent_configs + + def register_agent_config(self, config: "AgentConfig") -> "AgentConfig": + """ + Register an agent configuration with the application. + + Args: + config: The agent configuration to register + + Returns: + The registered configuration + """ + self._agent_configs[config.name] = config + return config + + async def create_agent(self, name: str) -> Tuple[Agent, Optional[AugmentedLLM]]: + """ + Create an agent from a registered configuration. + + This method can create both basic agents with LLMs and complex workflow patterns, + depending on the agent configuration. + + Args: + name: The name of the registered agent configuration + + Returns: + Tuple of (agent instance, LLM or workflow instance) + + Raises: + ValueError: If no agent configuration with that name exists + """ + + if name not in self._agent_configs: + raise ValueError(f"No agent configuration named '{name}' is registered") + + config = self._agent_configs[name] + + # Create the basic agent + agent = config.create_agent(context=self._context) + await agent.initialize() + + # Determine what kind of workflow/LLM to create + workflow_type = config.get_workflow_type() + + if workflow_type is None and config.llm_config is not None: + # Basic agent with an LLM + llm_instance = await config.llm_config.create_llm() + llm = await agent.attach_llm(lambda: llm_instance) + return agent, llm + + elif workflow_type == "parallel": + # Create a Parallel workflow + from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM + + # Get referenced agents + fan_in_agent, _ = await self.create_agent( + config.parallel_config.fan_in_agent + ) + fan_out_agents = [] + for agent_name in config.parallel_config.fan_out_agents: + fan_out_agent, _ = await self.create_agent(agent_name) + fan_out_agents.append(fan_out_agent) + + # Get LLM factory + llm_factory = config.llm_config.factory if config.llm_config else None + + # Create the workflow + parallel = ParallelLLM( + fan_in_agent=fan_in_agent, + fan_out_agents=fan_out_agents, + llm_factory=llm_factory, + **config.parallel_config.extra_params, + ) + + return agent, parallel + + # Handle other workflow types here... (simplified for now) + + # Default case - just return the agent without an LLM + return agent, None diff --git a/src/mcp_agent/app_server.py b/src/mcp_agent/app_server.py index f013d609b..1208c74ce 100644 --- a/src/mcp_agent/app_server.py +++ b/src/mcp_agent/app_server.py @@ -21,6 +21,7 @@ create_model_from_schema, ) from mcp_agent.agents.agent import Agent +from mcp_agent.agents.agent_config import AgentConfig from mcp_agent.config import MCPServerSettings from mcp_agent.context_dependent import ContextDependent from mcp_agent.executor.workflow import Workflow @@ -45,6 +46,13 @@ def __init__(self, mcp: FastMCP, context=None, **kwargs): for workflow_id, workflow_cls in self.context.app.workflows.items(): self.register_workflow(workflow_id, workflow_cls) + # Register existing agent configurations from the app + for name, config in self.context.app._agent_configs.items(): + logger.info(f"Registered agent config: {name}") + # Use the same tools for agent configs as for agent instances + # When the tools are called, we'll create the agent as needed + create_agent_specific_tools(self.mcp, self, config) + def register_workflow(self, workflow_id: str, workflow_cls: Type[Workflow]): """Register a workflow class.""" if workflow_id not in self.context.app.workflows: @@ -53,16 +61,83 @@ def register_workflow(self, workflow_id: str, workflow_cls: Type[Workflow]): create_workflow_specific_tools(self.mcp, workflow_id, workflow_cls) def register_agent(self, agent: Agent): - """Register an agent instance.""" + """ + Register an agent instance and create tools for it. + This is used for runtime agent instances. + + Args: + agent: The agent instance to register + + Returns: + The registered agent (may be an existing instance if already registered) + """ if agent.name not in self.active_agents: self.active_agents[agent.name] = agent # Create tools for this agent - create_agent_specific_tools(self.mcp, agent.name, agent) + create_agent_specific_tools(self.mcp, self, agent) return agent return self.active_agents[ agent.name ] # Return existing agent if already registered + async def get_or_create_agent(self, name: str) -> Agent: + """ + Get an existing agent or create it from a registered configuration. + Handles creation of both basic agents and workflow-based agents. + + Args: + name: The name of the agent or agent configuration + + Returns: + The agent instance (existing or newly created) + + Raises: + ToolError: If no agent or configuration with that name exists + """ + # Check if the agent is already active + if name in self.active_agents: + return self.active_agents[name] + + # Check if there's a configuration for this agent + if name in self.context.app._agent_configs: + try: + # Use the app's create_agent method which handles workflow setup and LLM attachment + agent, _ = await self.context.app.create_agent(name) + + # Register the agent with the server context + self.register_agent(agent) + + return agent + except Exception as e: + logger.error(f"Error creating agent {name}: {str(e)}") + raise ToolError(f"Failed to create agent {name}: {str(e)}") + + # Neither active nor configured + raise ToolError( + f"Agent not found: {name}. No active agent or configuration with this name exists." + ) + + async def create_agent_from_config(self, name: str) -> Agent: + """ + Create and register an agent from a registered configuration in the MCPApp. + This is a convenience method that delegates to get_or_create_agent. + + Args: + name: The name of the agent configuration + + Returns: + The created and initialized agent + + Raises: + ValueError: If no agent configuration with that name exists + """ + # Validate that this is actually a configuration, not an instance + if name not in self.context.app._agent_configs: + raise ValueError(f"No agent configuration named '{name}' is registered") + + # Use the common get_or_create_agent method + return await self.get_or_create_agent(name) + def create_mcp_server_for_app(app: MCPApp) -> FastMCP: """ @@ -137,12 +212,17 @@ def list_agents(ctx: MCPContext) -> Dict[str, Dict[str, Any]]: List all available agents with their detailed information. Returns information about each agent including their name, instruction, - and the MCP servers they have access to. This helps with understanding - what each agent is designed to do before calling it. + the MCP servers they have access to, and if it uses a workflow pattern. + This helps with understanding what each agent is designed to do before calling it. + + The list includes both active agent instances and agent configurations + that can be instantiated on-demand. """ server_context: ServerContext = ctx.request_context.lifespan_context server_registry = server_context.context.server_registry result = {} + + # Add active agents for name, agent in server_context.active_agents.items(): # Format instruction - handle callable instructions instruction = agent.instruction @@ -162,8 +242,70 @@ def list_agents(ctx: MCPContext) -> Dict[str, Dict[str, Any]]: f"agents/{name}/generate_str", f"agents/{name}/generate_structured", ], + "type": "instance", } + # Add agent configurations from the app (if not already active) + for name, config in server_context.context.app._agent_configs.items(): + if name not in result: # Skip if already added as active agent + # Format instruction - handle callable instructions + instruction = config.instruction + if callable(instruction): + instruction = instruction({}) + + servers = _get_server_descriptions(server_registry, config.server_names) + + # Get workflow type and additional info + workflow_type = config.get_workflow_type() + workflow_config = config.get_workflow_config() + + # Build detailed agent info + agent_info = { + "name": name, + "instruction": instruction, + "servers": servers, + "capabilities": ["generate", "generate_str", "generate_structured"], + "tool_endpoints": [ + f"agents/{name}/generate", + f"agents/{name}/generate_str", + f"agents/{name}/generate_structured", + ], + "type": "config", + } + + # Add workflow information if present + if workflow_type: + agent_info["workflow_type"] = workflow_type + if workflow_config: + # Include key details from the workflow config + # but exclude sensitive or callable fields + try: + config_dict = workflow_config.model_dump() + # Remove any callable fields + config_dict = { + k: v + for k, v in config_dict.items() + if not callable(v) and k != "extra_params" + } + agent_info["workflow_config"] = config_dict + except Exception as e: + logger.warning("Error getting workflow info: %s", str(e)) + + # Add LLM configuration if present + if config.llm_config: + try: + llm_info = { + "type": config.llm_config.factory.__name__ + if hasattr(config.llm_config.factory, "__name__") + else "custom", + "model": config.llm_config.model, + } + agent_info["llm_config"] = llm_info + except Exception as e: + logger.warning("Error getting LLM info: %s", str(e)) + + result[name] = agent_info + return result @mcp.tool(name="agents/create") @@ -461,22 +603,42 @@ async def run_workflow( if workflow_name not in app.workflows: raise ValueError(f"Workflow '{workflow_name}' not found.") - # Create a workflow instance + # Get the workflow class workflow_cls = app.workflows[workflow_name] - workflow = workflow_cls(executor=app.executor, name=workflow_name) - # Generate a unique ID for this workflow instance - workflow_id = str(uuid.uuid4()) + # Create and initialize the workflow instance using the factory method + try: + # Separate constructor args from run args if provided + run_args = args or {} + constructor_args = ( + run_args.pop("constructor_args", {}) + if isinstance(run_args, dict) + else {} + ) + + # Create workflow instance + workflow = await workflow_cls.create( + executor=app.executor, name=workflow_name, **constructor_args + ) + + # Generate a unique ID for this workflow instance + workflow_id = str(uuid.uuid4()) - # Store the workflow instance - server_config.active_workflows[workflow_id] = workflow + # Store the workflow instance + server_config.active_workflows[workflow_id] = workflow - # Run the workflow in a separate task - args = args or {} - run_task = asyncio.create_task(workflow.run(**args)) + # Run the workflow in a separate task with cleanup handling + run_task = asyncio.create_task( + _run_workflow_and_cleanup( + workflow, run_args, workflow_id, server_config + ) + ) - # Store the task to check status later - server_config.active_workflows[workflow_id + "_task"] = run_task + # Store the task to check status later + server_config.active_workflows[workflow_id + "_task"] = run_task + except Exception as e: + logger.error(f"Error creating workflow {workflow_name}: {str(e)}") + raise ValueError(f"Error creating workflow: {str(e)}") # Return information about the workflow return { @@ -793,26 +955,46 @@ def create_agent_tools(mcp: FastMCP, server_context: ServerContext): def create_agent_specific_tools( - mcp: FastMCP, server_context: ServerContext, agent: Agent + mcp: FastMCP, server_context: ServerContext, agent_or_config: Agent | AgentConfig ): - """Create specific tools for a given agent.""" + """ + Create specific tools for a given agent instance or configuration. + + Args: + mcp: The FastMCP server + server_context: The server context + agent_or_config: Either an Agent instance or an AgentConfig + """ + # Extract common properties based on whether we have an Agent or AgentConfig + if isinstance(agent_or_config, Agent): + name = agent_or_config.name + instruction = agent_or_config.instruction + server_names = agent_or_config.server_names + workflow_type = None + else: # AgentConfig + name = agent_or_config.name + instruction = agent_or_config.instruction + server_names = agent_or_config.server_names + workflow_type = agent_or_config.get_workflow_type() # Format instruction - handle callable instructions - instruction = agent.instruction if callable(instruction): instruction = instruction({}) server_registry = server_context.context.server_registry + # Add workflow info to description if present + workflow_info = f" using {workflow_type} workflow" if workflow_type else "" + # Add generate* tools for this agent @mcp.tool( - name=f"agents/{agent.name}/generate", + name=f"agents/{name}/generate", description=f""" - Run the '{agent.name}' agent using the given message. + Run the '{name}' agent{workflow_info} using the given message. This is similar to generating an LLM completion. Agent Description: {instruction} - Connected Servers: {_get_server_descriptions_as_string(server_registry, agent.server_names)} + Connected Servers: {_get_server_descriptions_as_string(server_registry, server_names)} Args: message: The prompt to send to the agent. @@ -827,17 +1009,17 @@ async def generate( message: str | MCPMessageParam | List[MCPMessageParam], request_params: RequestParams | None = None, ) -> List[MCPMessageResult]: - return await _agent_generate(ctx, agent.name, message, request_params) + return await _agent_generate(ctx, name, message, request_params) @mcp.tool( - name=f"agents/{agent.name}/generate_str", + name=f"agents/{name}/generate_str", description=f""" - Run the '{agent.name}' agent using the given message and return the response as a string. - Use agents/{agent.name}/generate for results in the original format, and - use agents/{agent.name}/generate_structured for results conforming to a specific schema. + Run the '{name}' agent{workflow_info} using the given message and return the response as a string. + Use agents/{name}/generate for results in the original format, and + use agents/{name}/generate_structured for results conforming to a specific schema. Agent Description: {instruction} - Connected Servers: {_get_server_descriptions_as_string(server_registry, agent.server_names)} + Connected Servers: {_get_server_descriptions_as_string(server_registry, server_names)} Args: message: The prompt to send to the agent. @@ -852,19 +1034,19 @@ async def generate_str( message: str | MCPMessageParam | List[MCPMessageParam], request_params: RequestParams | None = None, ) -> str: - return await _agent_generate_str(ctx, agent.name, message, request_params) + return await _agent_generate_str(ctx, name, message, request_params) # Add structured generation tool for this agent @mcp.tool( - name=f"agents/{agent.name}/generate_structured", + name=f"agents/{name}/generate_structured", description=f""" - Run the '{agent.name}' agent using the given message and return a response that matches the given schema. + Run the '{name}' agent{workflow_info} using the given message and return a response that matches the given schema. - Use agents/{agent.name}/generate for results in the original format, and - use agents/{agent.name}/generate_str for string result. + Use agents/{name}/generate for results in the original format, and + use agents/{name}/generate_str for string result. Agent Description: {instruction} - Connected Servers: {_get_server_descriptions_as_string(server_registry, agent.server_names)} + Connected Servers: {_get_server_descriptions_as_string(server_registry, server_names)} Args: message: The prompt to send to the agent. @@ -912,7 +1094,7 @@ async def generate_structured( request_params: RequestParams | None = None, ) -> Dict[str, Any]: return await _agent_generate_structured( - ctx, agent.name, message, response_schema, request_params + ctx, name, message, response_schema, request_params ) @@ -970,21 +1152,39 @@ async def workflow_specific_run( if workflow_id not in app.workflows: raise ValueError(f"Workflow '{workflow_id}' not found.") - # Create workflow instance - workflow = workflow_cls(executor=app.executor, name=workflow_id) + # Create workflow instance using the factory method + try: + # Separate constructor args from run args if provided + run_args = args or {} + constructor_args = ( + run_args.pop("constructor_args", {}) + if isinstance(run_args, dict) + else {} + ) - # Generate workflow instance ID - instance_id = str(uuid.uuid4()) + # Create and initialize workflow + workflow = await workflow_cls.create( + executor=app.executor, name=workflow_id, **constructor_args + ) - # Store workflow instance - server_config.active_workflows[instance_id] = workflow + # Generate workflow instance ID + instance_id = str(uuid.uuid4()) - # Run workflow in separate task - run_args = args or {} - run_task = asyncio.create_task(workflow.run(**run_args)) + # Store workflow instance + server_config.active_workflows[instance_id] = workflow - # Store task - server_config.active_workflows[instance_id + "_task"] = run_task + # Run workflow in separate task with cleanup handling + run_task = asyncio.create_task( + _run_workflow_and_cleanup( + workflow, run_args, instance_id, server_config + ) + ) + + # Store task + server_config.active_workflows[instance_id + "_task"] = run_task + except Exception as e: + logger.error(f"Error creating workflow {workflow_id}: {str(e)}") + raise ValueError(f"Error creating workflow: {str(e)}") # Return information about the workflow return { @@ -1307,13 +1507,14 @@ async def _agent_generate( """ server_context: ServerContext = ctx.request_context.lifespan_context - if agent_name not in server_context.active_agents: - raise ToolError(f"Agent not found: {agent_name}. Make sure the agent ") + # Get or create the agent - this will automatically create agent from config if needed + try: + agent = await server_context.get_or_create_agent(agent_name) + except ToolError as e: + raise e - agent = server_context.active_agents[agent_name] - if not agent: - raise ToolError(f"Agent not found: {agent_name}") - elif not agent.llm: + # Check if the agent has an LLM attached + if not agent.llm: raise ToolError( f"Agent {agent_name} does not have an LLM attached. Make sure to call the attach_llm method where the agent is created." ) @@ -1327,7 +1528,7 @@ async def _agent_generate( else: input_message = agent.llm.from_mcp_message_param(message) - # Check if the agent is already initialized + # Use the agent as a context manager to ensure proper initialization/cleanup async with agent: result = await agent.llm.generate( message=input_message, request_params=request_params @@ -1356,13 +1557,14 @@ async def _agent_generate_str( """ server_context: ServerContext = ctx.request_context.lifespan_context - if agent_name not in server_context.active_agents: - raise ToolError(f"Agent not found: {agent_name}. Make sure the agent ") + # Get or create the agent - this will automatically create agent from config if needed + try: + agent = await server_context.get_or_create_agent(agent_name) + except ToolError as e: + raise e - agent = server_context.active_agents[agent_name] - if not agent: - raise ToolError(f"Agent not found: {agent_name}") - elif not agent.llm: + # Check if the agent has an LLM attached + if not agent.llm: raise ToolError( f"Agent {agent_name} does not have an LLM attached. Make sure to call the attach_llm method where the agent is created." ) @@ -1376,7 +1578,7 @@ async def _agent_generate_str( else: input_message = agent.llm.from_mcp_message_param(message) - # Check if the agent is already initialized + # Use the agent as a context manager to ensure proper initialization/cleanup async with agent: result = await agent.llm.generate_str( message=input_message, request_params=request_params @@ -1435,13 +1637,14 @@ async def _agent_generate_structured( """ server_context: ServerContext = ctx.request_context.lifespan_context - if agent_name not in server_context.active_agents: - raise ToolError(f"Agent not found: {agent_name}. Make sure the agent ") + # Get or create the agent - this will automatically create agent from config if needed + try: + agent = await server_context.get_or_create_agent(agent_name) + except ToolError as e: + raise e - agent = server_context.active_agents[agent_name] - if not agent: - raise ToolError(f"Agent not found: {agent_name}") - elif not agent.llm: + # Check if the agent has an LLM attached + if not agent.llm: raise ToolError( f"Agent {agent_name} does not have an LLM attached. Make sure to call the attach_llm method where the agent is created." ) @@ -1455,9 +1658,10 @@ async def _agent_generate_structured( else: input_message = agent.llm.from_mcp_message_param(message) + # Create a Pydantic model from the schema response_model = create_model_from_schema(response_schema) - # Check if the agent is already initialized + # Use the agent as a context manager to ensure proper initialization/cleanup async with agent: result = await agent.llm.generate_structured( message=input_message, @@ -1466,3 +1670,35 @@ async def _agent_generate_structured( ) # Convert to dictionary for JSON serialization return result.model_dump(mode="json") + + +async def _run_workflow_and_cleanup(workflow, run_args, instance_id, server_context): + """ + Run a workflow and ensure proper cleanup regardless of outcome. + + Args: + workflow: The workflow instance to run + run_args: Arguments to pass to the workflow's run method + instance_id: The unique ID for this workflow instance + server_context: The server context for managing active workflows + + Returns: + The result from the workflow's run method + """ + try: + # Run the workflow + result = await workflow.run(**run_args) + return result + except Exception as e: + # Log and propagate exceptions + logger.error(f"Error in workflow {workflow.name} (ID: {instance_id}): {str(e)}") + raise + finally: + try: + # Always attempt to clean up the workflow + await workflow.cleanup() + except Exception as cleanup_error: + # Log but don't fail if cleanup fails + logger.error( + f"Error cleaning up workflow {workflow.name} (ID: {instance_id}): {str(cleanup_error)}" + ) diff --git a/src/mcp_agent/executor/workflow.py b/src/mcp_agent/executor/workflow.py index 0010841a3..166da38dc 100644 --- a/src/mcp_agent/executor/workflow.py +++ b/src/mcp_agent/executor/workflow.py @@ -72,12 +72,35 @@ def __init__( self.name = name or self.__class__.__name__ self.init_kwargs = kwargs self._logger = get_logger(f"workflow.{self.name}") + self._initialized = False # A simple workflow state object # If under Temporal, storing it as a field on this class # means it can be replayed automatically self.state = WorkflowState(metadata=metadata or {}) + @classmethod + async def create( + cls, executor: Executor, name: str | None = None, **kwargs: Any + ) -> "Workflow": + """ + Factory method to create and initialize a workflow instance. + + This default implementation creates a workflow instance and calls initialize(). + Subclasses can override this method for custom initialization logic. + + Args: + executor: The executor to use for this workflow + name: Optional name for the workflow (defaults to class name) + **kwargs: Additional parameters to pass to the workflow constructor + + Returns: + An initialized workflow instance + """ + workflow = cls(executor=executor, name=name, **kwargs) + await workflow.initialize() + return workflow + @abstractmethod async def run(self, *args: Any, **kwargs: Any) -> "WorkflowResult[T]": """ @@ -112,18 +135,37 @@ async def wait_for_input(self, description: str = "Provide input") -> str: async def initialize(self): """ - Optional initialization method that will be called before run. + Initialization method that will be called before run. Override this to set up any resources needed by the workflow. + + This checks the _initialized flag to prevent double initialization. """ + if self._initialized: + self._logger.debug(f"Workflow {self.name} already initialized, skipping") + return + self.state.status = "initializing" self._logger.debug(f"Initializing workflow {self.name}") + self._initialized = True + self.state.updated_at = datetime.utcnow().timestamp() async def cleanup(self): """ - Optional cleanup method that will be called after run. + Cleanup method that will be called after run. Override this to clean up any resources used by the workflow. + + This checks the _initialized flag to ensure cleanup is only done on initialized workflows. """ + if not self._initialized: + self._logger.debug( + f"Workflow {self.name} not initialized, skipping cleanup" + ) + return + self._logger.debug(f"Cleaning up workflow {self.name}") + self._initialized = False + self.state.status = "cleaned_up" + self.state.updated_at = datetime.utcnow().timestamp() async def __aenter__(self): """Support for async context manager pattern.""" diff --git a/src/mcp_agent/fast_app.py b/src/mcp_agent/fast_app.py new file mode 100644 index 000000000..368cf363f --- /dev/null +++ b/src/mcp_agent/fast_app.py @@ -0,0 +1,532 @@ +""" +FastMCPApp - Extended MCPApp with declarative agent and workflow configuration. +""" + +from typing import Callable, List, Optional, Tuple + +from mcp_agent.app import MCPApp +from mcp_agent.agents.agent import Agent +from mcp_agent.agents.agent_config import ( + AgentConfig, + AugmentedLLMConfig, + ParallelWorkflowConfig, + OrchestratorWorkflowConfig, + RouterWorkflowConfig, + EvaluatorOptimizerWorkflowConfig, + SwarmWorkflowConfig, +) +from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM + + +class FastMCPApp(MCPApp): + """ + Extension of MCPApp with declarative agent configuration and workflow patterns. + Provides decorators for easily defining agents and workflows. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def agent( + self, + name: str, + instruction: str, + server_names: List[str] = None, + llm_factory: Callable = None, + **kwargs, + ): + """ + Decorator to define a basic agent. + + Example: + @app.agent("finder", "You find information", ["fetch", "filesystem"]) + def finder_config(config): + config.llm_config = AugmentedLLMConfig(factory=OpenAIAugmentedLLM) + return config + + Args: + name: The name of the agent + instruction: The agent's instruction/system prompt + server_names: List of MCP servers the agent can access + llm_factory: Optional LLM factory to initialize the agent with + **kwargs: Additional parameters for the agent + + Returns: + Decorator function that registers the agent configuration + """ + + def decorator(config_fn): + # Create basic config + config = AgentConfig( + name=name, instruction=instruction, server_names=server_names or [] + ) + + # Add LLM config if provided + if llm_factory: + config.llm_config = AugmentedLLMConfig(factory=llm_factory) + + # Apply any extra configuration from the function + config = config_fn(config) + + # Register the configuration + self._agent_configs[name] = config + return config + + return decorator + + def parallel( + self, + name: str, + instruction: str, + fan_in: str, + fan_out: List[str], + llm_factory: Callable = None, + **kwargs, + ): + """ + Decorator to define a parallel workflow agent. + + Example: + @app.parallel("researcher", "Research team", + fan_in="aggregator", fan_out=["finder", "writer"]) + def researcher_config(config): + config.parallel_config.concurrent = True + return config + + Args: + name: The name of the workflow agent + instruction: The agent's instruction/system prompt + fan_in: Name of the agent to use for aggregating results + fan_out: List of agent names to distribute work to + llm_factory: Optional LLM factory for the workflow + **kwargs: Additional parameters + + Returns: + Decorator function that registers the parallel workflow configuration + """ + + def decorator(config_fn): + # Create basic config with parallel workflow + config = AgentConfig( + name=name, + instruction=instruction, + parallel_config=ParallelWorkflowConfig( + fan_in_agent=fan_in, fan_out_agents=fan_out + ), + ) + + # Add LLM config if provided + if llm_factory: + config.llm_config = AugmentedLLMConfig(factory=llm_factory) + + # Apply any extra configuration + config = config_fn(config) + + # Register the configuration + self._agent_configs[name] = config + return config + + return decorator + + def orchestrator( + self, + name: str, + instruction: str, + available_agents: List[str], + llm_factory: Callable = None, + **kwargs, + ): + """ + Decorator to define an orchestrator workflow agent. + + Example: + @app.orchestrator("manager", "Project manager", + available_agents=["finder", "writer", "analyst"]) + def manager_config(config): + config.orchestrator_config.max_iterations = 5 + return config + + Args: + name: The name of the workflow agent + instruction: The agent's instruction/system prompt + available_agents: List of agent names the orchestrator can use + llm_factory: Optional LLM factory for the workflow + **kwargs: Additional parameters + + Returns: + Decorator function that registers the orchestrator workflow configuration + """ + + def decorator(config_fn): + config = AgentConfig( + name=name, + instruction=instruction, + orchestrator_config=OrchestratorWorkflowConfig( + available_agents=available_agents + ), + ) + + if llm_factory: + config.llm_config = AugmentedLLMConfig(factory=llm_factory) + + config = config_fn(config) + self._agent_configs[name] = config + return config + + return decorator + + def router( + self, + name: str, + instruction: str, + agent_names: List[str], + router_type: str = "llm", + llm_factory: Callable = None, + **kwargs, + ): + """ + Decorator to define a router workflow agent. + + Example: + @app.router("dispatcher", "Routes tasks to specialists", + agent_names=["finder", "writer", "coder"]) + def dispatcher_config(config): + config.router_config.top_k = 2 + return config + + Args: + name: The name of the workflow agent + instruction: The agent's instruction/system prompt + agent_names: List of agent names the router can dispatch to + router_type: Type of router ("llm" or "embedding") + llm_factory: Optional LLM factory for the workflow + **kwargs: Additional parameters + + Returns: + Decorator function that registers the router workflow configuration + """ + + def decorator(config_fn): + config = AgentConfig( + name=name, + instruction=instruction, + router_config=RouterWorkflowConfig( + agent_names=agent_names, router_type=router_type + ), + ) + + if llm_factory: + config.llm_config = AugmentedLLMConfig(factory=llm_factory) + + config = config_fn(config) + self._agent_configs[name] = config + return config + + return decorator + + def evaluator_optimizer( + self, + name: str, + instruction: str, + evaluator: str, + optimizer: str, + llm_factory: Callable = None, + **kwargs, + ): + """ + Decorator to define an evaluator-optimizer workflow agent. + + Example: + @app.evaluator_optimizer("quality_team", "Ensures high quality output", + evaluator="critic", optimizer="writer") + def quality_team_config(config): + config.evaluator_optimizer_config.min_rating = "good" + return config + + Args: + name: The name of the workflow agent + instruction: The agent's instruction/system prompt + evaluator: Name of the agent to use as evaluator + optimizer: Name of the agent to use as optimizer + llm_factory: Optional LLM factory for the workflow + **kwargs: Additional parameters + + Returns: + Decorator function that registers the evaluator-optimizer workflow configuration + """ + + def decorator(config_fn): + config = AgentConfig( + name=name, + instruction=instruction, + evaluator_optimizer_config=EvaluatorOptimizerWorkflowConfig( + evaluator_agent=evaluator, optimizer_agent=optimizer + ), + ) + + if llm_factory: + config.llm_config = AugmentedLLMConfig(factory=llm_factory) + + config = config_fn(config) + self._agent_configs[name] = config + return config + + return decorator + + def swarm( + self, + name: str, + instruction: str, + agents: List[str], + llm_factory: Callable = None, + **kwargs, + ): + """ + Decorator to define a swarm workflow agent. + + Example: + @app.swarm("team", "A collaborative team of agents", + agents=["leader", "researcher", "writer"]) + def team_config(config): + config.swarm_config.context_variables = {"priority": "accuracy"} + return config + + Args: + name: The name of the workflow agent + instruction: The agent's instruction/system prompt + agents: List of agent names in the swarm + llm_factory: Optional LLM factory for the workflow + **kwargs: Additional parameters + + Returns: + Decorator function that registers the swarm workflow configuration + """ + + def decorator(config_fn): + config = AgentConfig( + name=name, + instruction=instruction, + swarm_config=SwarmWorkflowConfig(agents=agents), + ) + + if llm_factory: + config.llm_config = AugmentedLLMConfig(factory=llm_factory) + + config = config_fn(config) + self._agent_configs[name] = config + return config + + return decorator + + async def create_agent(self, name: str) -> Tuple[Agent, Optional[AugmentedLLM]]: + """ + Create an agent with its configured workflow. + + Args: + name: The name of the registered agent configuration + + Returns: + Tuple of (agent instance, augmented LLM or workflow instance) + + Raises: + ValueError: If no agent configuration with the given name exists + """ + if name not in self._agent_configs: + raise ValueError(f"No agent configuration named '{name}' is registered") + + config = self._agent_configs[name] + + # Create and initialize the basic agent + agent = config.create_agent(context=self._context) + await agent.initialize() + + # Handle different workflow types with type-safe configs + workflow_type = config.get_workflow_type() + + if workflow_type is None and config.llm_config is not None: + # Basic agent with simple LLM + llm_instance = await config.llm_config.create_llm() + llm = await agent.attach_llm(lambda: llm_instance) + return agent, llm + + elif workflow_type == "parallel": + # Create a Parallel workflow with type-safe config + parallel_config = config.parallel_config + from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM + + # Get referenced agents + fan_in_agent, _ = await self.create_agent(parallel_config.fan_in_agent) + fan_out_agents = [] + for agent_name in parallel_config.fan_out_agents: + fan_out_agent, _ = await self.create_agent(agent_name) + fan_out_agents.append(fan_out_agent) + + # Get LLM factory + llm_factory = config.llm_config.factory if config.llm_config else None + + # Create parallel workflow + parallel = ParallelLLM( + fan_in_agent=fan_in_agent, + fan_out_agents=fan_out_agents, + llm_factory=llm_factory, + concurrent=parallel_config.concurrent, + synchronize_fan_out_models=parallel_config.synchronize_fan_out_models, + **parallel_config.extra_params, + ) + + return agent, parallel + + elif workflow_type == "orchestrator": + # Create an Orchestrator workflow with type-safe config + orchestrator_config = config.orchestrator_config + from mcp_agent.workflows.orchestrator.orchestrator import Orchestrator + + # Get referenced agents + available_agents = [] + for agent_name in orchestrator_config.available_agents: + available_agent, _ = await self.create_agent(agent_name) + available_agents.append(available_agent) + + # Get the LLM factory + llm_factory = config.llm_config.factory if config.llm_config else None + + # Optional planner agent + planner = None + if orchestrator_config.planner_agent: + planner, _ = await self.create_agent(orchestrator_config.planner_agent) + + # Create the orchestrator + orchestrator = Orchestrator( + llm_factory=llm_factory, + available_agents=available_agents, + planner=planner, + max_iterations=orchestrator_config.max_iterations, + **orchestrator_config.extra_params, + ) + + return agent, orchestrator + + elif workflow_type == "router": + # Create a Router workflow with type-safe config + router_config = config.router_config + + # Get referenced agents + agents = [] + for agent_name in router_config.agent_names: + agent_inst, _ = await self.create_agent(agent_name) + agents.append(agent_inst) + + # Determine which router implementation to use + if router_config.router_type == "llm": + from mcp_agent.workflows.router.router_llm import LLMRouter + + # Get LLM factory + llm_factory = config.llm_config.factory if config.llm_config else None + llm_instance = None + if llm_factory: + llm_instance = await config.llm_config.create_llm() + + # Create the router + router = LLMRouter( + llm=llm_instance, agents=agents, **router_config.extra_params + ) + + else: # embedding router + # Create the router (implementation depends on embedding model) + if router_config.embedding_model == "cohere": + from mcp_agent.workflows.router.router_embedding_cohere import ( + CohereEmbeddingRouter, + ) + + router = CohereEmbeddingRouter( + agents=agents, **router_config.extra_params + ) + else: + from mcp_agent.workflows.router.router_embedding_openai import ( + OpenAIEmbeddingRouter, + ) + + router = OpenAIEmbeddingRouter( + agents=agents, **router_config.extra_params + ) + + return agent, router + + elif workflow_type == "evaluator_optimizer": + # Create an Evaluator-Optimizer workflow with type-safe config + eo_config = config.evaluator_optimizer_config + from mcp_agent.workflows.evaluator_optimizer.evaluator_optimizer import ( + EvaluatorOptimizerLLM, + QualityRating, + ) + + # Get referenced agents + evaluator_agent, _ = await self.create_agent(eo_config.evaluator_agent) + optimizer_agent, _ = await self.create_agent(eo_config.optimizer_agent) + + # Get LLM factory + llm_factory = config.llm_config.factory if config.llm_config else None + + # Parse min_rating string to enum + min_rating = QualityRating.GOOD # Default + try: + min_rating = QualityRating[eo_config.min_rating.upper()] + except (KeyError, AttributeError): + pass + + # Create the evaluator-optimizer + eo = EvaluatorOptimizerLLM( + evaluator=evaluator_agent, + optimizer=optimizer_agent, + llm_factory=llm_factory, + min_rating=min_rating, + max_iterations=eo_config.max_iterations, + **eo_config.extra_params, + ) + + return agent, eo + + elif workflow_type == "swarm": + # Create a Swarm workflow with type-safe config + swarm_config = config.swarm_config + + # Choose the swarm implementation based on LLM factory + llm_factory = config.llm_config.factory if config.llm_config else None + + if not llm_factory: + raise ValueError("A LLM factory is required for Swarm workflow") + + # Get the factory class name to determine which Swarm implementation to use + factory_class_name = llm_factory.__name__ + + if "Anthropic" in factory_class_name: + from mcp_agent.workflows.swarm.swarm_anthropic import AnthropicSwarm + + # Get the primary agent + primary_agent, _ = await self.create_agent(swarm_config.agents[0]) + + # Create the swarm + swarm = AnthropicSwarm( + agent=primary_agent, + context_variables=swarm_config.context_variables, + **swarm_config.extra_params, + ) + else: + # Default to OpenAI swarm + from mcp_agent.workflows.swarm.swarm_openai import OpenAISwarm + + # Get the primary agent + primary_agent, _ = await self.create_agent(swarm_config.agents[0]) + + # Create the swarm + swarm = OpenAISwarm( + agent=primary_agent, + context_variables=swarm_config.context_variables, + **swarm_config.extra_params, + ) + + return agent, swarm + + else: + # No workflow or LLM config, just return the basic agent + return agent, None From 1e3521832e53f8ab55a017b017687fc6677f9824 Mon Sep 17 00:00:00 2001 From: Sarmad Qadri Date: Thu, 3 Apr 2025 09:46:41 -0400 Subject: [PATCH 3/9] WIP update app server --- examples/workflow_mcp_server/server.py | 18 +- src/mcp_agent/agents/agent.py | 14 +- src/mcp_agent/agents/agent_config.py | 203 +++- src/mcp_agent/app.py | 79 +- src/mcp_agent/app_server.py | 1253 +++++------------------- src/mcp_agent/context.py | 8 + src/mcp_agent/executor/executor.py | 52 +- src/mcp_agent/executor/workflow.py | 588 ++++++++--- src/mcp_agent/fast_app.py | 279 +----- 9 files changed, 986 insertions(+), 1508 deletions(-) diff --git a/examples/workflow_mcp_server/server.py b/examples/workflow_mcp_server/server.py index 5bfdfae4a..475a04a24 100644 --- a/examples/workflow_mcp_server/server.py +++ b/examples/workflow_mcp_server/server.py @@ -10,7 +10,10 @@ import asyncio import os import logging -from typing import Dict, Any, Optional +from typing import Dict, Any, Optional, TYPE_CHECKING + +if TYPE_CHECKING: + from mcp_agent.context import Context from mcp_agent.fast_app import FastMCPApp from mcp_agent.app_server import create_mcp_server_for_app @@ -18,12 +21,11 @@ from mcp_agent.agents.agent_config import ( AgentConfig, AugmentedLLMConfig, - ParallelWorkflowConfig, + ParallelLLMConfig, ) from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM from mcp_agent.executor.workflow import Workflow, WorkflowResult -from mcp_agent.executor.executor import Executor # Initialize logging logging.basicConfig(level=logging.INFO) @@ -38,22 +40,22 @@ class DataProcessorWorkflow(Workflow[str]): @classmethod async def create( - cls, executor: Executor, name: str | None = None, **kwargs: Any + cls, name: str | None = None, context: Optional["Context"] = None, **kwargs: Any ) -> "DataProcessorWorkflow": """ Factory method to create and initialize the DataProcessorWorkflow. Demonstrates how to override the default create method for specialized initialization. Args: - executor: The executor to use name: Optional workflow name + context: Optional context to use (will use global context if not provided) **kwargs: Additional parameters for customization Returns: An initialized DataProcessorWorkflow instance """ # Create the workflow instance - workflow = cls(executor=executor, name=name or "data_processor", **kwargs) + workflow = cls(name=name or "data_processor", context=context, **kwargs) # Initialize it (which will set up agents, etc.) await workflow.initialize() @@ -344,7 +346,7 @@ class SummarizationWorkflowRegistered(SummarizationWorkflow): llm_config=AugmentedLLMConfig( factory=AnthropicAugmentedLLM, model="claude-3-opus-20240229" ), - parallel_config=ParallelWorkflowConfig( + parallel_config=ParallelLLMConfig( fan_in_agent="programmatic_editor", fan_out_agents=["programmatic_summarizer", "programmatic_fact_checker"], concurrent=True, @@ -437,7 +439,7 @@ async def main(): logger.info("Registered agent configurations:") for name, config in app.agent_configs.items(): - workflow_type = config.get_workflow_type() or "basic" + workflow_type = config.get_agent_type() or "basic" logger.info(f" - {name} ({workflow_type})") # Create the MCP server that exposes both workflows and agent configurations diff --git a/src/mcp_agent/agents/agent.py b/src/mcp_agent/agents/agent.py index b7e94c78c..73b683c67 100644 --- a/src/mcp_agent/agents/agent.py +++ b/src/mcp_agent/agents/agent.py @@ -84,7 +84,9 @@ async def initialize(self): tool: FastTool = FastTool.from_function(function) self._function_tool_map[tool.name] = tool - async def attach_llm(self, llm_factory: Callable[..., LLM]) -> LLM: + async def attach_llm( + self, llm_factory: Callable[..., LLM] | None = None, llm: LLM | None = None + ) -> LLM: """ Create an LLM instance for the agent. @@ -92,11 +94,19 @@ async def attach_llm(self, llm_factory: Callable[..., LLM]) -> LLM: llm_factory: A callable that constructs an AugmentedLLM or its subclass. The factory should accept keyword arguments matching the AugmentedLLM constructor parameters. + llm: An instance of AugmentedLLM or its subclass. If provided, this will be used + instead of creating a new instance. Returns: An instance of AugmentedLLM or one of its subclasses. """ - self.llm = llm_factory(agent=self) + if llm: + self.llm = llm + elif llm_factory: + self.llm = llm_factory(agent=self) + else: + raise ValueError("Either llm_factory or llm must be provided") + return self.llm async def shutdown(self): diff --git a/src/mcp_agent/agents/agent_config.py b/src/mcp_agent/agents/agent_config.py index dd5d20fb3..cecfececa 100644 --- a/src/mcp_agent/agents/agent_config.py +++ b/src/mcp_agent/agents/agent_config.py @@ -2,19 +2,34 @@ Agent configuration classes for declarative agent definition. """ -from typing import TYPE_CHECKING +from typing import ( + Any, + Callable, + Dict, + List, + Literal, + Optional, + TypeVar, + Union, + Generic, + TYPE_CHECKING, +) from pydantic import BaseModel, Field, ConfigDict -from typing import Any, Callable, Dict, List, Literal, Optional, TypeVar, Union, Generic from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM, RequestParams from mcp_agent.human_input.types import HumanInputCallback if TYPE_CHECKING: from mcp_agent.agents.agent import Agent + from mcp_agent.context import Context # Define TypeVar for LLM types LLM = TypeVar("LLM", bound=AugmentedLLM) +# region AugmentedLLM configs + +# TODO: saqadri - Use these in the constructors for the respective classes + class AugmentedLLMConfig(BaseModel, Generic[LLM]): """ @@ -25,18 +40,13 @@ class AugmentedLLMConfig(BaseModel, Generic[LLM]): # The factory function or class that creates the LLM factory: Callable[..., LLM] - # Common parameters that apply to most LLMs - model: Optional[str] = None - temperature: Optional[float] = None - max_tokens: Optional[int] = None - # Model-specific parameters provider_params: Dict[str, Any] = Field(default_factory=dict) # Request parameters used in generate calls default_request_params: Optional[RequestParams] = None - model_config = ConfigDict(arbitrary_types_allowed=True) + model_config = ConfigDict(extra=True, arbitrary_types_allowed=True) async def create_llm(self) -> LLM: """ @@ -45,14 +55,9 @@ async def create_llm(self) -> LLM: Returns: An instance of the configured LLM type """ - # Combine common parameters with provider-specific parameters params = {} - if self.model: - params["model"] = self.model - if self.temperature is not None: - params["temperature"] = self.temperature - if self.max_tokens is not None: - params["max_tokens"] = self.max_tokens + # Combine common parameters with provider-specific parameters + # ... # Add any additional provider-specific parameters params.update(self.provider_params) @@ -80,54 +85,42 @@ class BasicAgentConfig(BaseModel): model_config = ConfigDict(arbitrary_types_allowed=True) -# Type-safe configs for each workflow pattern - - -class ParallelWorkflowConfig(BaseModel): +class ParallelLLMConfig(BaseModel): """Type-safe configuration for ParallelLLM workflow pattern.""" fan_in_agent: str # Name of the agent to use for fan-in fan_out_agents: List[str] # Names of agents to use for fan-out - concurrent: bool = True - synchronize_fan_out_models: bool = False extra_params: Dict[str, Any] = Field(default_factory=dict) -class OrchestratorWorkflowConfig(BaseModel): +class OrchestratorLLMConfig(BaseModel): """Type-safe configuration for Orchestrator workflow pattern.""" available_agents: List[str] # Names of agents available to the orchestrator - max_iterations: int = 10 planner_agent: Optional[str] = None # Optional custom planner agent extra_params: Dict[str, Any] = Field(default_factory=dict) -class RouterWorkflowConfig(BaseModel): +class RouterConfig(BaseModel): """Type-safe configuration for Router workflow pattern.""" agent_names: List[str] # Names of agents to route between top_k: int = 1 router_type: Literal["llm", "embedding"] = "llm" - embedding_model: Optional[str] = None # For embedding-based router extra_params: Dict[str, Any] = Field(default_factory=dict) -class EvaluatorOptimizerWorkflowConfig(BaseModel): +class EvaluatorOptimizerConfig(BaseModel): """Type-safe configuration for Evaluator-Optimizer workflow pattern.""" evaluator_agent: str # Name of the agent to use as evaluator optimizer_agent: str # Name of the agent to use as optimizer min_rating: str = "excellent" # Minimum quality rating to accept - max_iterations: int = 5 + max_refinements: int = 3 # Maximum number of refinements extra_params: Dict[str, Any] = Field(default_factory=dict) -class SwarmWorkflowConfig(BaseModel): - """Type-safe configuration for Swarm workflow pattern.""" - - agents: List[str] # Names of agents in the swarm - context_variables: Dict[str, Any] = Field(default_factory=dict) - extra_params: Dict[str, Any] = Field(default_factory=dict) +# endregion class AgentConfig(BaseModel): @@ -147,18 +140,17 @@ class AgentConfig(BaseModel): llm_config: Optional[AugmentedLLMConfig] = None # Workflow configuration - only one should be set - parallel_config: Optional[ParallelWorkflowConfig] = None - orchestrator_config: Optional[OrchestratorWorkflowConfig] = None - router_config: Optional[RouterWorkflowConfig] = None - evaluator_optimizer_config: Optional[EvaluatorOptimizerWorkflowConfig] = None - swarm_config: Optional[SwarmWorkflowConfig] = None + parallel_config: Optional[ParallelLLMConfig] = None + orchestrator_config: Optional[OrchestratorLLMConfig] = None + router_config: Optional[RouterConfig] = None + evaluator_optimizer_config: Optional[EvaluatorOptimizerConfig] = None # Additional kwargs extra_kwargs: Dict[str, Any] = Field(default_factory=dict) model_config = ConfigDict(arbitrary_types_allowed=True) - def create_agent(self, context=None) -> "Agent": + def create_agent(self, context: Optional["Context"] = None) -> "Agent": """ Create a basic agent instance. This doesn't initialize the agent or attach an LLM. @@ -182,7 +174,7 @@ def create_agent(self, context=None) -> "Agent": **self.extra_kwargs, ) - def get_workflow_type(self) -> Optional[str]: + def get_agent_type(self) -> Optional[str]: """ Get the type of workflow this agent uses, if any. @@ -210,7 +202,7 @@ def get_workflow_config(self) -> Optional[Any]: Returns: The appropriate workflow configuration object or None """ - workflow_type = self.get_workflow_type() + workflow_type = self.get_agent_type() if workflow_type == "parallel": return self.parallel_config elif workflow_type == "orchestrator": @@ -222,3 +214,132 @@ def get_workflow_config(self) -> Optional[Any]: elif workflow_type == "swarm": return self.swarm_config return None + + +async def create_agent(name: str, context: "Context") -> "Agent": + """ + Create an agent with its configured workflow. + + Args: + name: The name of the registered agent configuration + + Returns: + Tuple of (agent instance, augmented LLM or workflow instance) + + Raises: + ValueError: If no agent configuration with the given name exists + """ + agent_configs = context.app._agent_configs + if not agent_configs: + raise ValueError("No AgentConfig's were found") + + # Check if the agent name is registered + config = agent_configs.get(name) + if not config: + raise ValueError(f"No agent configuration named '{name}' is registered") + + # Create and initialize the agent + agent = config.create_agent(context=context) + await agent.initialize() + + # Create and attach the AugmentedLLM workflow if applicable + workflow_type = config.get_agent_type() + llm_factory = config.llm_config.factory if config.llm_config else None + + if workflow_type is None and config.llm_config is not None: + # Basic agent with simple LLM + llm_instance = await config.llm_config.create_llm() + llm = await agent.attach_llm(lambda: llm_instance) + return agent, llm + elif workflow_type == "parallel": + from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM # pylint: disable=C0415 + + parallel_config = config.parallel_config + + # Get referenced agents + fan_in_agent = await create_agent( + name=parallel_config.fan_in_agent, context=context + ) + fan_out_agents = [] + for agent_name in parallel_config.fan_out_agents: + fan_out_agent = await create_agent(name=agent_name, context=context) + fan_out_agents.append(fan_out_agent) + + # Create parallel workflow + parallel = ParallelLLM( + fan_in_agent=fan_in_agent, + fan_out_agents=fan_out_agents, + llm_factory=llm_factory, + context=context, + **parallel_config.extra_params, + ) + + # Attach the parallel workflow to the agent + await agent.attach_llm(llm=parallel) + return agent + elif workflow_type == "orchestrator": + from mcp_agent.workflows.orchestrator.orchestrator import Orchestrator # pylint: disable=C0415 + + orchestrator_config = config.orchestrator_config + + # Get referenced agents + available_agents = [] + for agent_name in orchestrator_config.available_agents: + available_agent = await create_agent(name=agent_name, context=context) + available_agents.append(available_agent) + + # Optional planner agent + planner = None + if orchestrator_config.planner_agent: + planner = await create_agent( + name=orchestrator_config.planner_agent, context=context + ) + + # Create the orchestrator + orchestrator = Orchestrator( + llm_factory=llm_factory, + available_agents=available_agents, + planner=planner, + context=context, + **orchestrator_config.extra_params, + ) + + # Attach the orchestrator workflow to the agent + await agent.attach_llm(llm=orchestrator) + return agent + elif workflow_type == "evaluator_optimizer": + from mcp_agent.workflows.evaluator_optimizer.evaluator_optimizer import ( # pylint: disable=C0415 + EvaluatorOptimizerLLM, + QualityRating, + ) + + eo_config = config.evaluator_optimizer_config + + evaluator_agent = await create_agent( + name=eo_config.evaluator_agent, context=context + ) + optimizer_agent = await create_agent( + name=eo_config.optimizer_agent, context=context + ) + + # Parse min_rating string to enum + min_rating = QualityRating.GOOD # Default + try: + min_rating = QualityRating[eo_config.min_rating.upper()] + except (KeyError, AttributeError): + pass + + # Create the evaluator-optimizer + eo = EvaluatorOptimizerLLM( + evaluator=evaluator_agent, + optimizer=optimizer_agent, + llm_factory=llm_factory, + min_rating=min_rating, + max_iterations=eo_config.max_iterations, + **eo_config.extra_params, + ) + + return agent, eo + else: + # No workflow or LLM config, just return the basic agent + return agent diff --git a/src/mcp_agent/app.py b/src/mcp_agent/app.py index ec5576fdf..f49bac84c 100644 --- a/src/mcp_agent/app.py +++ b/src/mcp_agent/app.py @@ -1,4 +1,4 @@ -from typing import Any, Dict, Optional, Type, TypeVar, Callable, Tuple, TYPE_CHECKING +from typing import Any, Dict, Optional, Type, TypeVar, Callable, TYPE_CHECKING from datetime import timedelta import asyncio import sys @@ -14,11 +14,10 @@ from mcp_agent.human_input.types import HumanInputCallback from mcp_agent.human_input.handler import console_input_callback from mcp_agent.workflows.llm.llm_selector import ModelSelector -from mcp_agent.agents.agent import Agent -from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM if TYPE_CHECKING: from mcp_agent.agents.agent_config import AgentConfig + from mcp_agent.executor.workflow import Workflow R = TypeVar("R") @@ -47,6 +46,7 @@ async def run(self): def __init__( self, name: str = "mcp_application", + description: str | None = None, settings: Optional[Settings] | str = None, human_input_callback: Optional[HumanInputCallback] = console_input_callback, signal_notification: Optional[SignalWaitCallback] = None, @@ -57,14 +57,17 @@ def __init__( Initialize the application with a name and optional settings. Args: name: Name of the application + description: Description of the application. If you expose the MCPApp as an MCP server, + provide a detailed description, since it will be used as the server's description. settings: Application configuration - If unspecified, the settings are loaded from mcp_agent.config.yaml. If this is a string, it is treated as the path to the config file to load. human_input_callback: Callback for handling human input signal_notification: Callback for getting notified on workflow signals/events. - upstream_session: Optional upstream session if the MCPApp is running as a server to an MCP client. + upstream_session: Upstream session if the MCPApp is running as a server to an MCP client. initialize_model_selector: Initializes the built-in ModelSelector to help with model selection. Defaults to False. """ self.name = name + self.description = description or "MCP Agent Application" # We use these to initialize the context in initialize() self._config_or_path = settings @@ -73,7 +76,7 @@ def __init__( self._upstream_session = upstream_session self._model_selector = model_selector - self._workflows: Dict[str, Type] = {} # id to workflow class + self._workflows: Dict[str, Type["Workflow"]] = {} # id to workflow class self._agent_configs: Dict[ str, "AgentConfig" ] = {} # name to agent configuration @@ -354,69 +357,3 @@ def register_agent_config(self, config: "AgentConfig") -> "AgentConfig": """ self._agent_configs[config.name] = config return config - - async def create_agent(self, name: str) -> Tuple[Agent, Optional[AugmentedLLM]]: - """ - Create an agent from a registered configuration. - - This method can create both basic agents with LLMs and complex workflow patterns, - depending on the agent configuration. - - Args: - name: The name of the registered agent configuration - - Returns: - Tuple of (agent instance, LLM or workflow instance) - - Raises: - ValueError: If no agent configuration with that name exists - """ - - if name not in self._agent_configs: - raise ValueError(f"No agent configuration named '{name}' is registered") - - config = self._agent_configs[name] - - # Create the basic agent - agent = config.create_agent(context=self._context) - await agent.initialize() - - # Determine what kind of workflow/LLM to create - workflow_type = config.get_workflow_type() - - if workflow_type is None and config.llm_config is not None: - # Basic agent with an LLM - llm_instance = await config.llm_config.create_llm() - llm = await agent.attach_llm(lambda: llm_instance) - return agent, llm - - elif workflow_type == "parallel": - # Create a Parallel workflow - from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM - - # Get referenced agents - fan_in_agent, _ = await self.create_agent( - config.parallel_config.fan_in_agent - ) - fan_out_agents = [] - for agent_name in config.parallel_config.fan_out_agents: - fan_out_agent, _ = await self.create_agent(agent_name) - fan_out_agents.append(fan_out_agent) - - # Get LLM factory - llm_factory = config.llm_config.factory if config.llm_config else None - - # Create the workflow - parallel = ParallelLLM( - fan_in_agent=fan_in_agent, - fan_out_agents=fan_out_agents, - llm_factory=llm_factory, - **config.parallel_config.extra_params, - ) - - return agent, parallel - - # Handle other workflow types here... (simplified for now) - - # Default case - just return the agent without an LLM - return agent, None diff --git a/src/mcp_agent/app_server.py b/src/mcp_agent/app_server.py index 1208c74ce..0a4ce17c6 100644 --- a/src/mcp_agent/app_server.py +++ b/src/mcp_agent/app_server.py @@ -1,18 +1,16 @@ """ -MCPAgentServer - Exposes mcp-agent workflows and agents as MCP tools. +MCPAgentServer - Exposes MCPApp as MCP server, and +mcp-agent workflows and agents as MCP tools. """ -import asyncio -import inspect -import uuid +import json from collections.abc import AsyncIterator from contextlib import asynccontextmanager -from typing import Any, Dict, List, Literal, Optional, Type +from typing import Any, Dict, List, Type, TYPE_CHECKING from mcp.server.fastmcp import Context as MCPContext, FastMCP from mcp.server.fastmcp.exceptions import ToolError -from mcp.shared.session import BaseSession -from mcp.types import ToolListChangedNotification +from mcp.server.fastmcp.tools import Tool as FastTool from mcp_agent.app import MCPApp from mcp_agent.app_server_types import ( @@ -21,44 +19,59 @@ create_model_from_schema, ) from mcp_agent.agents.agent import Agent -from mcp_agent.agents.agent_config import AgentConfig +from mcp_agent.agents.agent_config import AgentConfig, create_agent from mcp_agent.config import MCPServerSettings from mcp_agent.context_dependent import ContextDependent -from mcp_agent.executor.workflow import Workflow -from mcp_agent.executor.workflow_signal import Signal +from mcp_agent.executor.workflow import Workflow, WorkflowRegistry from mcp_agent.logging.logger import get_logger from mcp_agent.mcp_server_registry import ServerRegistry from mcp_agent.workflows.llm.augmented_llm import MessageParamT, RequestParams +if TYPE_CHECKING: + from mcp_agent.context import Context + logger = get_logger(__name__) class ServerContext(ContextDependent): """Context object for the MCP App server.""" - def __init__(self, mcp: FastMCP, context=None, **kwargs): + def __init__(self, mcp: FastMCP, context: "Context", **kwargs): super().__init__(context=context, **kwargs) self.mcp = mcp - self.active_workflows: Dict[str, Any] = {} self.active_agents: Dict[str, Agent] = {} - # Register existing workflows from the app - for workflow_id, workflow_cls in self.context.app.workflows.items(): - self.register_workflow(workflow_id, workflow_cls) - - # Register existing agent configurations from the app - for name, config in self.context.app._agent_configs.items(): - logger.info(f"Registered agent config: {name}") - # Use the same tools for agent configs as for agent instances - # When the tools are called, we'll create the agent as needed - create_agent_specific_tools(self.mcp, self, config) - - def register_workflow(self, workflow_id: str, workflow_cls: Type[Workflow]): + # Initialize workflow registry if not already present + if not self.context.workflow_registry: + self.context.workflow_registry = WorkflowRegistry() + + # TODO: saqadri (MAC) - This shouldn't be needed here because + # in app_specific_lifespan we'll call create_agent_specific_tools + # and create_workflow_specific_tools respectively. + # # Register existing workflows from the app + # # Use the MCPApp's workflow registry as the source of truth for available workflows + # logger.info(f"Registering {len(self.context.app.workflows)} workflows") + # for workflow_name, workflow_cls in self.context.app.workflows.items(): + # logger.info(f"Registering workflow: {workflow_name}") + # self.register_workflow(workflow_name, workflow_cls) + + # # Register existing agent configurations from the app + # for name, config in self.context.app._agent_configs.items(): + # logger.info(f"Registered agent config: {name}") + # # Use the same tools for agent configs as for agent instances + # # When the tools are called, we'll create the agent as needed + # create_agent_specific_tools(self.mcp, self, config) + + # TODO: saqadri (MAC) - Do we need to notify the client that tools list changed? + # Since this is at initialization time, we may not need to + # (depends on when the server reports that it's intialized/ready) + + def register_workflow(self, workflow_name: str, workflow_cls: Type[Workflow]): """Register a workflow class.""" - if workflow_id not in self.context.app.workflows: - self.context.app.workflows[workflow_id] = workflow_cls + if workflow_name not in self.context.app.workflows: + self.context.app.workflows[workflow_name] = workflow_cls # Create tools for this workflow - create_workflow_specific_tools(self.mcp, workflow_id, workflow_cls) + create_workflow_specific_tools(self.mcp, workflow_name, workflow_cls) def register_agent(self, agent: Agent): """ @@ -82,14 +95,13 @@ def register_agent(self, agent: Agent): async def get_or_create_agent(self, name: str) -> Agent: """ - Get an existing agent or create it from a registered configuration. - Handles creation of both basic agents and workflow-based agents. + Get an existing Agent or create it from a registered AgentConfig. Args: - name: The name of the agent or agent configuration + name: The name of the Agent/AgentConfig. Returns: - The agent instance (existing or newly created) + The Agent instance (existing or newly created) Raises: ToolError: If no agent or configuration with that name exists @@ -99,45 +111,21 @@ async def get_or_create_agent(self, name: str) -> Agent: return self.active_agents[name] # Check if there's a configuration for this agent - if name in self.context.app._agent_configs: + agent_config = self.context.app._agent_configs.get(name) + if agent_config: try: - # Use the app's create_agent method which handles workflow setup and LLM attachment - agent, _ = await self.context.app.create_agent(name) - - # Register the agent with the server context + agent = await create_agent(name=agent_config.name, context=self.context) self.register_agent(agent) - return agent except Exception as e: logger.error(f"Error creating agent {name}: {str(e)}") - raise ToolError(f"Failed to create agent {name}: {str(e)}") + raise ToolError(f"Failed to create agent {name}: {str(e)}") from e # Neither active nor configured raise ToolError( f"Agent not found: {name}. No active agent or configuration with this name exists." ) - async def create_agent_from_config(self, name: str) -> Agent: - """ - Create and register an agent from a registered configuration in the MCPApp. - This is a convenience method that delegates to get_or_create_agent. - - Args: - name: The name of the agent configuration - - Returns: - The created and initialized agent - - Raises: - ValueError: If no agent configuration with that name exists - """ - # Validate that this is actually a configuration, not an instance - if name not in self.context.app._agent_configs: - raise ValueError(f"No agent configuration named '{name}' is registered") - - # Use the common get_or_create_agent method - return await self.get_or_create_agent(name) - def create_mcp_server_for_app(app: MCPApp) -> FastMCP: """ @@ -173,14 +161,13 @@ async def app_specific_lifespan(mcp: FastMCP) -> AsyncIterator[ServerContext]: # Create FastMCP server with the app's name mcp = FastMCP( name=app.name or "mcp_agent_server", - # TODO: saqadri (MAC) - create a much more detailed description based on all the available agents and workflows, + # TODO: saqadri (MAC) - create a much more detailed description + # based on all the available agents and workflows, # or use the MCPApp's description if available. - instructions=f"MCP server exposing {app.name} workflows and agents", + instructions=f"MCP server exposing {app.name} workflows and agents. Description: {app.description}", lifespan=app_specific_lifespan, ) - # region Server Tools - @mcp.tool(name="servers/list") def list_servers(ctx: MCPContext) -> List[MCPServerSettings]: """ @@ -212,11 +199,8 @@ def list_agents(ctx: MCPContext) -> Dict[str, Dict[str, Any]]: List all available agents with their detailed information. Returns information about each agent including their name, instruction, - the MCP servers they have access to, and if it uses a workflow pattern. + the MCP servers they have access to. This helps with understanding what each agent is designed to do before calling it. - - The list includes both active agent instances and agent configurations - that can be instantiated on-demand. """ server_context: ServerContext = ctx.request_context.lifespan_context server_registry = server_context.context.server_registry @@ -242,141 +226,10 @@ def list_agents(ctx: MCPContext) -> Dict[str, Dict[str, Any]]: f"agents/{name}/generate_str", f"agents/{name}/generate_structured", ], - "type": "instance", } - # Add agent configurations from the app (if not already active) - for name, config in server_context.context.app._agent_configs.items(): - if name not in result: # Skip if already added as active agent - # Format instruction - handle callable instructions - instruction = config.instruction - if callable(instruction): - instruction = instruction({}) - - servers = _get_server_descriptions(server_registry, config.server_names) - - # Get workflow type and additional info - workflow_type = config.get_workflow_type() - workflow_config = config.get_workflow_config() - - # Build detailed agent info - agent_info = { - "name": name, - "instruction": instruction, - "servers": servers, - "capabilities": ["generate", "generate_str", "generate_structured"], - "tool_endpoints": [ - f"agents/{name}/generate", - f"agents/{name}/generate_str", - f"agents/{name}/generate_structured", - ], - "type": "config", - } - - # Add workflow information if present - if workflow_type: - agent_info["workflow_type"] = workflow_type - if workflow_config: - # Include key details from the workflow config - # but exclude sensitive or callable fields - try: - config_dict = workflow_config.model_dump() - # Remove any callable fields - config_dict = { - k: v - for k, v in config_dict.items() - if not callable(v) and k != "extra_params" - } - agent_info["workflow_config"] = config_dict - except Exception as e: - logger.warning("Error getting workflow info: %s", str(e)) - - # Add LLM configuration if present - if config.llm_config: - try: - llm_info = { - "type": config.llm_config.factory.__name__ - if hasattr(config.llm_config.factory, "__name__") - else "custom", - "model": config.llm_config.model, - } - agent_info["llm_config"] = llm_info - except Exception as e: - logger.warning("Error getting LLM info: %s", str(e)) - - result[name] = agent_info - return result - @mcp.tool(name="agents/create") - async def create_agent( - ctx: MCPContext, - name: str, - instruction: str, - server_names: List[str], - llm: Literal["openai", "anthropic"] = "openai", - ) -> Dict[str, Any]: - """ - Create a new agent with given name, instruction and list of MCP servers it is allowed to access. - - Args: - name: The name of the agent to create. It must be a unique name not already in agents/list. - instruction: Instructions for the agent (i.e. system prompt). - server_names: List of MCP server names the agent should be able to access. - These MUST be one of the names retrieved using servers/list tool endpoint. - - Returns: - Detailed information about the created agent. - """ - server_context: ServerContext = ctx.request_context.lifespan_context - - agent = Agent( - name=name, - instruction=instruction, - server_names=server_names, - context=server_context.context, - ) - - # TODO: saqadri (MAC) - Add better support for multiple LLMs. - if llm == "openai": - from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM # pylint: disable=C0415 - - await agent.attach_llm(OpenAIAugmentedLLM) - elif llm == "anthropic": - from mcp_agent.workflows.llm.augmented_llm_anthropic import ( # pylint: disable=C0415 - AnthropicAugmentedLLM, - ) - - await agent.attach_llm(AnthropicAugmentedLLM) - else: - raise ToolError( - f"Unsupported LLM type: {llm}. Only 'openai' and 'anthropic' are presently supported." - ) - - await agent.initialize() - server_context.register_agent(agent) - - # Notify that tools have changed - session: BaseSession = ctx.session - session.send_notification( - ToolListChangedNotification(method="notifications/tools/list_changed") - ) - - server_registry = server_context.context.server_registry - servers = _get_server_descriptions(server_registry, agent.server_names) - - # Return detailed agent info - return { - "name": name, - "instruction": instruction, - "servers": servers, - "capabilities": ["generate", "generate_str", "generate_structured"], - "tool_endpoints": [ - f"agents/{name}/generate", - f"agents/{name}/generate_structured", - ], - } - @mcp.tool(name="agents/generate") async def agent_generate( ctx: MCPContext, @@ -389,9 +242,10 @@ async def agent_generate( This is similar to generating an LLM completion. Args: - agent_name: Name of the agent to use. This must be one of the names retrieved using agents/list tool endpoint. + agent_name: Name of the agent to use. + This must be one of the names retrieved using agents/list tool endpoint. message: The prompt to send to the agent. - request_params: Optional parameters for the request, such as max_tokens and model/model preferences. + request_params: Optional parameters to configure the LLM generation. Returns: The generated response from the agent. @@ -411,9 +265,10 @@ async def agent_generate_str( use agents/generate_structured for results conforming to a specific schema. Args: - agent_name: Name of the agent to use. This must be one of the names retrieved using agents/list tool endpoint. + agent_name: Name of the agent to use. + This must be one of the names retrieved using agents/list tool endpoint. message: The prompt to send to the agent. - request_params: Optional parameters for the request, such as max_tokens and model/model preferences. + request_params: Optional parameters to configure the LLM generation. Returns: The generated response from the agent. @@ -432,11 +287,12 @@ async def agent_generate_structured( Generate a structured response from an agent that matches the given schema. Args: - agent_name: Name of the agent to use. This must be one of the names retrieved using agents/list tool endpoint. + agent_name: Name of the agent to use. + This must be one of the names retrieved using agents/list tool endpoint. message: The prompt to send to the agent. response_schema: The JSON schema that defines the shape to generate the response in. This schema can be generated using type.schema_json() for a Pydantic model. - request_params: Optional parameters for the request, such as max_tokens and model/model preferences. + request_params: Optional parameters to configure the LLM generation. Returns: A dictionary representation of the structured response. @@ -481,179 +337,70 @@ async def agent_generate_structured( @mcp.tool(name="workflows/list") def list_workflows(ctx: MCPContext) -> Dict[str, Dict[str, Any]]: """ - List all available workflows exposed with their detailed information. - - Returns information about each workflow including name, description, and parameters. + List all available workflow types with their detailed information. + Returns information about each workflow type including name, description, and parameters. This helps in making an informed decision about which workflow to run. """ - server_config: ServerContext = ctx.request_context.lifespan_context + server_context: ServerContext = ctx.request_context.lifespan_context result = {} - for workflow_id, workflow_cls in server_config.context.app.workflows.items(): + for workflow_name, workflow_cls in server_context.context.app.workflows.items(): # Get workflow documentation - doc = workflow_cls.__doc__ or "No description available" - - # Get workflow run method parameters using inspection - parameters = {} - if hasattr(workflow_cls, "run"): - sig = inspect.signature(workflow_cls.run) - for param_name, param in sig.parameters.items(): - if param_name != "self": - param_info = { - "type": str(param.annotation) - .replace("", ""), - "required": param.default == inspect.Parameter.empty, - } - if param.default != inspect.Parameter.empty: - param_info["default"] = param.default - parameters[param_name] = param_info - - result[workflow_id] = { - "name": workflow_id, - "description": doc.strip(), - "parameters": parameters, + run_fn_tool = FastTool.from_function(workflow_cls.run) + + # Define common endpoints for all workflows + endpoints = [ + f"workflows/{workflow_name}/run", + f"workflows/{workflow_name}/get_status", + ] + + result[workflow_name] = { + "name": workflow_name, + "description": workflow_cls.__doc__ or run_fn_tool.description, "capabilities": ["run", "pause", "resume", "cancel", "get_status"], - "tool_endpoints": [ - f"workflows/{workflow_id}/run", - f"workflows/{workflow_id}/get_status", - f"workflows/{workflow_id}/pause", - f"workflows/{workflow_id}/resume", - f"workflows/{workflow_id}/cancel", - ], + "tool_endpoints": endpoints, + "run_parameters": run_fn_tool.parameters, } return result - @mcp.tool(name="workflows/list_running") - def list_running_workflows(ctx: MCPContext) -> Dict[str, Dict[str, Any]]: + @mcp.tool(name="workflows/runs/list") + def list_workflow_runs(ctx: MCPContext) -> List[Dict[str, Any]]: """ - List all running workflow instances with their detailed status information. + List all workflow instances (runs) with their detailed status information. + This returns information about actual workflow instances (runs), not workflow types. For each running workflow, returns its ID, name, current state, and available operations. This helps in identifying and managing active workflow instances. Returns: - A dictionary mapping workflow IDs to their detailed status information. + A dictionary mapping workflow instance IDs to their detailed status information. """ - server_config: ServerContext = ctx.request_context.lifespan_context - - result = {} - for workflow_id, workflow in server_config.active_workflows.items(): - # Skip task entries - if workflow_id.endswith("_task"): - continue - - task = server_config.active_workflows.get(workflow_id + "_task") - - # Get workflow information - workflow_info = { - "id": workflow_id, - "name": workflow.name, - "running": task is not None and not task.done() if task else False, - "state": workflow.state.model_dump() - if hasattr(workflow, "state") - else {}, - "tool_endpoints": [ - f"workflows/{workflow.name}/get_status", - f"workflows/{workflow.name}/pause", - f"workflows/{workflow.name}/resume", - f"workflows/{workflow.name}/cancel", - ], - } - - if task and task.done(): - try: - task_result = task.result() - workflow_info["result"] = ( - task_result.model_dump() - if hasattr(task_result, "model_dump") - else str(task_result) - ) - workflow_info["completed"] = True - workflow_info["error"] = None - except Exception as e: - workflow_info["result"] = None - workflow_info["completed"] = False - workflow_info["error"] = str(e) - - result[workflow_id] = workflow_info + server_context: ServerContext = ctx.request_context.lifespan_context - return result + # Get all workflow statuses from the registry + workflow_statuses = server_context.context.workflow_registry.list_workflows() + return workflow_statuses @mcp.tool(name="workflows/run") async def run_workflow( ctx: MCPContext, workflow_name: str, - args: Optional[Dict[str, Any]] = None, - ) -> Dict[str, Any]: + run_parameters: Dict[str, Any] | None = None, + ) -> str: """ Run a workflow with the given name. Args: workflow_name: The name of the workflow to run. - args: Optional arguments to pass to the workflow. + run_parameters: Arguments to pass to the workflow run. + workflows/list method will return the run_parameters schema for each workflow. Returns: - Information about the running workflow including its ID and metadata. + The workflow ID of the started workflow run, which can be passed to + workflows/get_status, workflows/pause, workflows/resume, and workflows/cancel. """ - server_config: ServerContext = ctx.request_context.lifespan_context - app = server_config.context.app - - if workflow_name not in app.workflows: - raise ValueError(f"Workflow '{workflow_name}' not found.") - - # Get the workflow class - workflow_cls = app.workflows[workflow_name] - - # Create and initialize the workflow instance using the factory method - try: - # Separate constructor args from run args if provided - run_args = args or {} - constructor_args = ( - run_args.pop("constructor_args", {}) - if isinstance(run_args, dict) - else {} - ) - - # Create workflow instance - workflow = await workflow_cls.create( - executor=app.executor, name=workflow_name, **constructor_args - ) - - # Generate a unique ID for this workflow instance - workflow_id = str(uuid.uuid4()) - - # Store the workflow instance - server_config.active_workflows[workflow_id] = workflow - - # Run the workflow in a separate task with cleanup handling - run_task = asyncio.create_task( - _run_workflow_and_cleanup( - workflow, run_args, workflow_id, server_config - ) - ) - - # Store the task to check status later - server_config.active_workflows[workflow_id + "_task"] = run_task - except Exception as e: - logger.error(f"Error creating workflow {workflow_name}: {str(e)}") - raise ValueError(f"Error creating workflow: {str(e)}") - - # Return information about the workflow - return { - "workflow_id": workflow_id, - "workflow_name": workflow_name, - "status": "running", - "args": args, - "tool_endpoints": [ - f"workflows/{workflow_name}/get_status", - f"workflows/{workflow_name}/pause", - f"workflows/{workflow_name}/resume", - f"workflows/{workflow_name}/cancel", - ], - "message": f"Workflow {workflow_name} started with ID {workflow_id}. Use the returned workflow_id with other workflow tools.", - } + return await _workflow_run(ctx, workflow_name, run_parameters) @mcp.tool(name="workflows/get_status") def get_workflow_status(ctx: MCPContext, workflow_id: str) -> Dict[str, Any]: @@ -664,120 +411,79 @@ def get_workflow_status(ctx: MCPContext, workflow_id: str) -> Dict[str, Any]: whether it's running or completed, and any results or errors encountered. Args: - workflow_id: The ID of the workflow to check. + workflow_id: The ID of the workflow to check, + received from workflows/run or workflows/runs/list. Returns: A dictionary with comprehensive information about the workflow status. """ - server_config: ServerContext = ctx.request_context.lifespan_context - - if workflow_id not in server_config.active_workflows: - raise ValueError(f"Workflow with ID '{workflow_id}' not found.") - - workflow = server_config.active_workflows[workflow_id] - task = server_config.active_workflows.get(workflow_id + "_task") - - status = { - "id": workflow_id, - "name": workflow.name, - "running": task is not None and not task.done() if task else False, - "state": workflow.state.model_dump() if hasattr(workflow, "state") else {}, - "available_actions": ["pause", "resume", "cancel"] - if task and not task.done() - else [], - "tool_endpoints": [ - f"workflows/{workflow.name}/get_status", - ], - } - - # Add appropriate action endpoints based on status - if task and not task.done(): - status["tool_endpoints"].extend( - [ - f"workflows/{workflow.name}/pause", - f"workflows/{workflow.name}/resume", - f"workflows/{workflow.name}/cancel", - ] - ) - - if task and task.done(): - try: - result = task.result() - - # Convert result to a useful format - if hasattr(result, "model_dump"): - result_data = result.model_dump() - elif hasattr(result, "__dict__"): - result_data = result.__dict__ - else: - result_data = str(result) - - status["result"] = result_data - status["completed"] = True - status["error"] = None - except Exception as e: - status["result"] = None - status["completed"] = False - status["error"] = str(e) - status["exception_type"] = type(e).__name__ - - return status + return _workflow_status(ctx, workflow_id) @mcp.tool(name="workflows/pause") - async def pause_workflow(ctx: MCPContext, workflow_id: str) -> bool: + async def pause_workflow( + ctx: MCPContext, + workflow_id: str, + ) -> bool: """ Pause a running workflow. Args: - workflow_id: The ID of the workflow to pause. + workflow_id: The ID of the workflow to pause, + received from workflows/run or workflows/runs/list. Returns: True if the workflow was paused, False otherwise. """ - server_config: ServerContext = ctx.request_context.lifespan_context + server_context: ServerContext = ctx.request_context.lifespan_context + workflow_registry = server_context.context.workflow_registry - if workflow_id not in server_config.active_workflows: - raise ValueError(f"Workflow with ID '{workflow_id}' not found.") + if not workflow_registry: + raise ToolError("Workflow registry not found for MCPApp Server.") - _workflow = server_config.active_workflows[workflow_id] + # Get the workflow instance from the registry + workflow = workflow_registry.get_workflow(workflow_id) + if not workflow: + raise ValueError(f"Workflow with ID '{workflow_id}' not found.") - # Signal the workflow to pause - try: - await server_config.context.app.executor.signal( - "pause", workflow_id=workflow_id - ) - return True - except Exception as e: - logger.error(f"Error pausing workflow {workflow_id}: {e}") - return False + # Pause the workflow directly + return await workflow.pause() @mcp.tool(name="workflows/resume") async def resume_workflow( - ctx: MCPContext, workflow_id: str, input_data: Optional[str] = None + ctx: MCPContext, + workflow_id: str, + signal_name: str | None = "resume", + payload: str | None = None, ) -> bool: """ Resume a paused workflow. Args: - workflow_id: The ID of the workflow to resume. - input_data: Optional input data to provide to the workflow. + workflow_id: The ID of the workflow to resume, + received from workflows/run or workflows/runs/list. + signal_name: Optional name of the signal to send to resume the workflow. + This will default to "resume", but can be a custom signal name + if the workflow was paused on a specific signal. + payload: Optional payload to provide the workflow upon resumption. + For example, if a workflow is waiting for human input, + this can be the human input. Returns: True if the workflow was resumed, False otherwise. """ - server_config: ServerContext = ctx.request_context.lifespan_context + server_context: ServerContext = ctx.request_context.lifespan_context + workflow_registry = server_context.context.workflow_registry - if workflow_id not in server_config.active_workflows: - raise ValueError(f"Workflow with ID '{workflow_id}' not found.") + if not workflow_registry: + raise ToolError("Workflow registry not found for MCPApp Server.") - # Signal the workflow to resume - try: - signal = Signal(name="resume", workflow_id=workflow_id, payload=input_data) - await server_config.context.app.executor.signal_bus.signal(signal) - return True - except Exception as e: - logger.error(f"Error resuming workflow {workflow_id}: {e}") - return False + # Get the workflow instance from the registry + workflow = workflow_registry.get_workflow(workflow_id) + if not workflow: + raise ToolError(f"Workflow with ID '{workflow_id}' not found.") + + # Resume the workflow directly + return await workflow.resume(signal_name, payload) @mcp.tool(name="workflows/cancel") async def cancel_workflow(ctx: MCPContext, workflow_id: str) -> bool: @@ -790,148 +496,16 @@ async def cancel_workflow(ctx: MCPContext, workflow_id: str) -> bool: Returns: True if the workflow was cancelled, False otherwise. """ - server_config: ServerContext = ctx.request_context.lifespan_context - - if workflow_id not in server_config.active_workflows: - raise ValueError(f"Workflow with ID '{workflow_id}' not found.") - - task = server_config.active_workflows.get(workflow_id + "_task") - - if task and not task.done(): - # Cancel the task - task.cancel() - - # Signal the workflow to cancel - try: - await server_config.context.app.executor.signal( - "cancel", workflow_id=workflow_id - ) - - # Remove from active workflows - server_config.active_workflows.pop(workflow_id, None) - server_config.active_workflows.pop(workflow_id + "_task", None) - - return True - except Exception as e: - logger.error(f"Error cancelling workflow {workflow_id}: {e}") - return False - - return False - - @mcp.tool(name="workflow_signal/wait_for_signal") - async def wait_for_signal( - ctx: MCPContext, - signal_name: str, - workflow_id: str = None, - description: str = None, - timeout_seconds: int = None, - ) -> Dict[str, Any]: - """ - Provides information about a signal that a workflow is waiting for. - - This tool doesn't actually make the workflow wait (that's handled internally), - but it provides information about what signal is being waited for and how to - respond to it. - - Args: - signal_name: The name of the signal to wait for. - workflow_id: Optional workflow ID to associate with the signal. - description: Optional description of what the signal is for. - timeout_seconds: Optional timeout in seconds. - - Returns: - Information about the signal and how to respond to it. - """ - _server_context: ServerContext = ctx.request_context.lifespan_context - - # Inform about how to send the signal - return { - "signal_name": signal_name, - "workflow_id": workflow_id, - "description": description or f"Waiting for signal '{signal_name}'", - "status": "waiting_for_signal", - "timeout_seconds": timeout_seconds, - "instructions": "To respond to this signal, use the workflow_signal/send tool with the same signal_name and workflow_id.", - "related_tools": ["workflow_signal/send"], - } - - @mcp.tool(name="workflow_signal/send") - async def send_signal( - ctx: MCPContext, - signal_name: str, - workflow_id: str = None, - payload: Any = None, - ) -> Dict[str, bool]: - """ - Send a signal to a workflow. - - This can be used to respond to a workflow that is waiting for input or - to send a signal to control workflow execution. - - Args: - signal_name: The name of the signal to send. - workflow_id: Optional workflow ID to associate with the signal. - payload: Optional data to include with the signal. - - Returns: - Confirmation that the signal was sent. - """ - server_config: ServerContext = ctx.request_context.lifespan_context - executor = server_config.context.app.executor - - # Create and send the signal - signal = Signal(name=signal_name, workflow_id=workflow_id, payload=payload) - - try: - await executor.signal_bus.signal(signal) - return { - "success": True, - "message": f"Signal '{signal_name}' sent successfully", - } - except Exception as e: - logger.error(f"Error sending signal {signal_name}: {e}") - return {"success": False, "message": f"Error sending signal: {str(e)}"} - - @mcp.tool(name="workflows/wait_for_input") - async def workflow_wait_for_input( - ctx: MCPContext, workflow_id: str, description: str = "Provide input" - ) -> Dict[str, Any]: - """ - Get information about a workflow that is waiting for human input. - - This tool helps coordinate when a workflow is waiting for human input by - providing clear instructions on how to provide that input. - - Args: - workflow_id: The ID of the workflow. - description: Description of what input is needed. - - Returns: - Instructions on how to provide input to the waiting workflow. - """ - server_config: ServerContext = ctx.request_context.lifespan_context + server_context: ServerContext = ctx.request_context.lifespan_context + workflow_registry = server_context.context.workflow_registry - if workflow_id not in server_config.active_workflows: + # Get the workflow instance from the registry + workflow = workflow_registry.get_workflow(workflow_id) + if not workflow: raise ValueError(f"Workflow with ID '{workflow_id}' not found.") - workflow = server_config.active_workflows[workflow_id] - - # Provide more helpful information about how to send the input - return { - "workflow_id": workflow_id, - "workflow_name": workflow.name, - "description": description, - "status": "waiting_for_input", - "instructions": "To provide input, use workflows/resume with the workflow_id and input_data parameters.", - "example": { - "tool": "workflows/resume", - "args": { - "workflow_id": workflow_id, - "input_data": "Example input data", - }, - }, - "tool_endpoints": [f"workflows/{workflow.name}/resume"], - } + # Cancel the workflow directly + return await workflow.cancel() # endregion @@ -953,9 +527,12 @@ def create_agent_tools(mcp: FastMCP, server_context: ServerContext): for _, agent in server_context.active_agents.items(): create_agent_specific_tools(mcp, server_context, agent) + for _, agent_config in server_context.app._agent_configs.items(): + agent = server_context.get_or_create_agent(agent_config.name) + def create_agent_specific_tools( - mcp: FastMCP, server_context: ServerContext, agent_or_config: Agent | AgentConfig + mcp: FastMCP, server_context: ServerContext, agent: Agent | AgentConfig ): """ Create specific tools for a given agent instance or configuration. @@ -966,16 +543,14 @@ def create_agent_specific_tools( agent_or_config: Either an Agent instance or an AgentConfig """ # Extract common properties based on whether we have an Agent or AgentConfig - if isinstance(agent_or_config, Agent): - name = agent_or_config.name - instruction = agent_or_config.instruction - server_names = agent_or_config.server_names - workflow_type = None + if isinstance(agent, Agent): + name = agent.name + instruction = agent.instruction + server_names = agent.server_names else: # AgentConfig - name = agent_or_config.name - instruction = agent_or_config.instruction - server_names = agent_or_config.server_names - workflow_type = agent_or_config.get_workflow_type() + name = agent.name + instruction = agent.instruction + server_names = agent.server_names # Format instruction - handle callable instructions if callable(instruction): @@ -983,14 +558,11 @@ def create_agent_specific_tools( server_registry = server_context.context.server_registry - # Add workflow info to description if present - workflow_info = f" using {workflow_type} workflow" if workflow_type else "" - # Add generate* tools for this agent @mcp.tool( name=f"agents/{name}/generate", description=f""" - Run the '{name}' agent{workflow_info} using the given message. + Run the '{name}' agent using the given message. This is similar to generating an LLM completion. Agent Description: {instruction} @@ -1014,7 +586,7 @@ async def generate( @mcp.tool( name=f"agents/{name}/generate_str", description=f""" - Run the '{name}' agent{workflow_info} using the given message and return the response as a string. + Run the '{name}' agent using the given message and return the response as a string. Use agents/{name}/generate for results in the original format, and use agents/{name}/generate_structured for results conforming to a specific schema. @@ -1040,7 +612,7 @@ async def generate_str( @mcp.tool( name=f"agents/{name}/generate_structured", description=f""" - Run the '{name}' agent{workflow_info} using the given message and return a response that matches the given schema. + Run the '{name}' agent using the given message and return a response that matches the given schema. Use agents/{name}/generate for results in the original format, and use agents/{name}/generate_str for string result. @@ -1103,346 +675,58 @@ async def generate_structured( # region per-Workflow Tools -def create_workflow_tools(mcp: FastMCP, server_config: ServerContext): +def create_workflow_tools(mcp: FastMCP, server_context: ServerContext): """ Create workflow-specific tools for registered workflows. This is called at server start to register specific endpoints for each workflow. """ - if not server_config: + if not server_context: logger.warning("Server config not available for creating workflow tools") return - for workflow_id, workflow_cls in server_config.context.app.workflows.items(): - create_workflow_specific_tools(mcp, workflow_id, workflow_cls) + for workflow_name, workflow_cls in server_context.context.app.workflows.items(): + create_workflow_specific_tools(mcp, workflow_name, workflow_cls) -def create_workflow_specific_tools(mcp: FastMCP, workflow_id: str, workflow_cls: Type): +def create_workflow_specific_tools( + mcp: FastMCP, workflow_name: str, workflow_cls: Type["Workflow"] +): """Create specific tools for a given workflow.""" - # Get workflow documentation - doc = workflow_cls.__doc__ or "No description available" - doc = doc.strip() - - # Get workflow run method parameters using inspection - parameters = {} - if hasattr(workflow_cls, "run"): - sig = inspect.signature(workflow_cls.run) - for param_name, param in sig.parameters.items(): - if param_name != "self": - param_info = { - "type": str(param.annotation) - .replace("", ""), - "required": param.default == inspect.Parameter.empty, - } - if param.default != inspect.Parameter.empty: - param_info["default"] = param.default - parameters[param_name] = param_info - - # Create a run tool for this workflow - @mcp.tool(name=f"workflows/{workflow_id}/run") - async def workflow_specific_run( - ctx: MCPContext, - args: Optional[Dict[str, Any]] = None, - ) -> Dict[str, Any]: - """Run the workflow with the given arguments.""" - server_config: ServerContext = ctx.request_context.lifespan_context - app = server_config.context.app - - if workflow_id not in app.workflows: - raise ValueError(f"Workflow '{workflow_id}' not found.") + run_fn_tool = FastTool.from_function(workflow_cls.run) + run_fn_tool_params = json.dumps(run_fn_tool.parameters, indent=2) - # Create workflow instance using the factory method - try: - # Separate constructor args from run args if provided - run_args = args or {} - constructor_args = ( - run_args.pop("constructor_args", {}) - if isinstance(run_args, dict) - else {} - ) - - # Create and initialize workflow - workflow = await workflow_cls.create( - executor=app.executor, name=workflow_id, **constructor_args - ) - - # Generate workflow instance ID - instance_id = str(uuid.uuid4()) - - # Store workflow instance - server_config.active_workflows[instance_id] = workflow - - # Run workflow in separate task with cleanup handling - run_task = asyncio.create_task( - _run_workflow_and_cleanup( - workflow, run_args, instance_id, server_config - ) - ) - - # Store task - server_config.active_workflows[instance_id + "_task"] = run_task - except Exception as e: - logger.error(f"Error creating workflow {workflow_id}: {str(e)}") - raise ValueError(f"Error creating workflow: {str(e)}") - - # Return information about the workflow - return { - "workflow_id": instance_id, - "workflow_name": workflow_id, - "status": "running", - "args": args, - "tool_endpoints": [ - f"workflows/{workflow_id}/get_status", - f"workflows/{workflow_id}/pause", - f"workflows/{workflow_id}/resume", - f"workflows/{workflow_id}/cancel", - ], - "message": f"Workflow {workflow_id} started with ID {instance_id}. Use the returned workflow_id with other workflow tools.", - } - - # Format parameter documentation - param_docs = [] - for param_name, param_info in parameters.items(): - default_info = ( - f" (default: {param_info.get('default', 'required')})" - if not param_info.get("required", True) - else "" - ) - param_docs.append( - f"- {param_name}: {param_info.get('type', 'Any')}{default_info}" - ) + @mcp.tool( + name=f"workflows/{workflow_name}/run", + description=f""" + Run the '{workflow_name}' workflow and get a workflow ID back. + Workflow Description: {workflow_cls.__doc__} - param_doc_str = "\n".join(param_docs) if param_docs else "- No parameters required" - - # Update the docstring - workflow_specific_run.__doc__ = f""" - Run the {workflow_id} workflow. - - Description: {doc} - - Parameters: - {param_doc_str} - - Args: - args: Dictionary containing the parameters for the workflow. - - Returns: - Information about the running workflow including its ID and metadata. - """ + {run_fn_tool.description} - # Create a status tool for this workflow - @mcp.tool(name=f"workflows/{workflow_id}/get_status") - def workflow_specific_status( - ctx: MCPContext, workflow_instance_id: str + Args: + run_parameters: Dictionary of parameters for the workflow run. + The schema for these parameters is as follows: + {run_fn_tool_params} + """, + ) + async def run( + ctx: MCPContext, + run_parameters: Dict[str, Any] | None = None, ) -> Dict[str, Any]: - """Get the status of a running workflow instance.""" - server_config: ServerContext = ctx.request_context.lifespan_context - - if workflow_instance_id not in server_config.active_workflows: - raise ValueError(f"Workflow instance '{workflow_instance_id}' not found.") - - workflow = server_config.active_workflows[workflow_instance_id] - if workflow_id != workflow.name: - raise ValueError( - f"Workflow instance '{workflow_instance_id}' is not a {workflow_id} workflow." - ) - - task = server_config.active_workflows.get(workflow_instance_id + "_task") - - status = { - "id": workflow_instance_id, - "name": workflow.name, - "running": task is not None and not task.done() if task else False, - "state": workflow.state.model_dump() if hasattr(workflow, "state") else {}, - "available_actions": ["pause", "resume", "cancel"] - if task and not task.done() - else [], - "tool_endpoints": [ - f"workflows/{workflow_id}/get_status", - ], - } - - # Add appropriate action endpoints based on status - if task and not task.done(): - status["tool_endpoints"].extend( - [ - f"workflows/{workflow_id}/pause", - f"workflows/{workflow_id}/resume", - f"workflows/{workflow_id}/cancel", - ] - ) - - if task and task.done(): - try: - result = task.result() - - # Convert result to a useful format - if hasattr(result, "model_dump"): - result_data = result.model_dump() - elif hasattr(result, "__dict__"): - result_data = result.__dict__ - else: - result_data = str(result) - - status["result"] = result_data - status["completed"] = True - status["error"] = None - except Exception as e: - status["result"] = None - status["completed"] = False - status["error"] = str(e) - status["exception_type"] = type(e).__name__ - - return status - - # Update the docstring - workflow_specific_status.__doc__ = f""" - Get the status of a running {workflow_id} workflow instance. - - Description: {doc} - - Args: - workflow_instance_id: The ID of the workflow instance to check. - - Returns: - A dictionary with detailed information about the workflow status. - """ - - # Create a pause tool for this workflow - @mcp.tool(name=f"workflows/{workflow_id}/pause") - async def workflow_specific_pause( - ctx: MCPContext, workflow_instance_id: str - ) -> bool: - """Pause a running workflow instance.""" - server_config: ServerContext = ctx.request_context.lifespan_context - - if workflow_instance_id not in server_config.active_workflows: - raise ValueError(f"Workflow instance '{workflow_instance_id}' not found.") - - workflow = server_config.active_workflows[workflow_instance_id] - if workflow_id != workflow.name: - raise ValueError( - f"Workflow instance '{workflow_instance_id}' is not a {workflow_id} workflow." - ) - - # Signal workflow to pause - try: - await server_config.context.app.executor.signal( - "pause", workflow_id=workflow_instance_id - ) - return True - except Exception as e: - logger.error(f"Error pausing workflow {workflow_instance_id}: {e}") - return False - - # Update the docstring - workflow_specific_pause.__doc__ = f""" - Pause a running {workflow_id} workflow instance. - - Description: {doc} - - Args: - workflow_instance_id: The ID of the workflow instance to pause. - - Returns: - True if the workflow was paused, False otherwise. - """ - - # Create a resume tool for this workflow - @mcp.tool(name=f"workflows/{workflow_id}/resume") - async def workflow_specific_resume( - ctx: MCPContext, workflow_instance_id: str, input_data: Optional[str] = None - ) -> bool: - """Resume a paused workflow instance.""" - server_config: ServerContext = ctx.request_context.lifespan_context - - if workflow_instance_id not in server_config.active_workflows: - raise ValueError(f"Workflow instance '{workflow_instance_id}' not found.") - - workflow = server_config.active_workflows[workflow_instance_id] - if workflow_id != workflow.name: - raise ValueError( - f"Workflow instance '{workflow_instance_id}' is not a {workflow_id} workflow." - ) - - # Signal workflow to resume - try: - signal = Signal( - name="resume", workflow_id=workflow_instance_id, payload=input_data - ) - await server_config.context.app.executor.signal_bus.signal(signal) - return True - except Exception as e: - logger.error(f"Error resuming workflow {workflow_instance_id}: {e}") - return False - - # Update the docstring - workflow_specific_resume.__doc__ = f""" - Resume a paused {workflow_id} workflow instance. - - Description: {doc} - - Args: - workflow_instance_id: The ID of the workflow instance to resume. - input_data: Optional input data to provide to the workflow. - - Returns: - True if the workflow was resumed, False otherwise. - """ + return await _workflow_run(ctx, workflow_name, run_parameters) - # Create a cancel tool for this workflow - @mcp.tool(name=f"workflows/{workflow_id}/cancel") - async def workflow_specific_cancel( - ctx: MCPContext, workflow_instance_id: str - ) -> bool: - """Cancel a running workflow instance.""" - server_config: ServerContext = ctx.request_context.lifespan_context - - if workflow_instance_id not in server_config.active_workflows: - raise ValueError(f"Workflow instance '{workflow_instance_id}' not found.") - - workflow = server_config.active_workflows[workflow_instance_id] - if workflow_id != workflow.name: - raise ValueError( - f"Workflow instance '{workflow_instance_id}' is not a {workflow_id} workflow." - ) - - task = server_config.active_workflows.get(workflow_instance_id + "_task") - - if task and not task.done(): - # Cancel task - task.cancel() - - # Signal workflow to cancel - try: - await server_config.context.app.executor.signal( - "cancel", workflow_id=workflow_instance_id - ) - - # Remove from active workflows - server_config.active_workflows.pop(workflow_instance_id, None) - server_config.active_workflows.pop(workflow_instance_id + "_task", None) - - return True - except Exception as e: - logger.error(f"Error cancelling workflow {workflow_instance_id}: {e}") - return False - - return False - - # Update the docstring - workflow_specific_cancel.__doc__ = f""" - Cancel a running {workflow_id} workflow instance. - - Description: {doc} - - Args: - workflow_instance_id: The ID of the workflow instance to cancel. + @mcp.tool( + name=f"workflows/{workflow_name}/get_status", + description=f""" + Get the status of a running {workflow_name} workflow. - Returns: - True if the workflow was cancelled, False otherwise. - """ + Args: + workflow_id: The ID of the running workflow, received from workflows/{workflow_name}/run. + """, + ) + def get_status(ctx: MCPContext, workflow_id: str) -> Dict[str, Any]: + return _workflow_status(ctx, workflow_id, workflow_name) # endregion @@ -1454,7 +738,7 @@ def _get_server_descriptions( servers: List[dict[str, str]] = [] if server_registry: for server_name in server_names: - config = server_registry.get_server_config(server_name) + config = server_registry.get_server_context(server_name) if config: servers.append( { @@ -1487,24 +771,15 @@ def _get_server_descriptions_as_string( return "\n".join(server_strings) +# region Agent Utils + + async def _agent_generate( ctx: MCPContext, agent_name: str, message: str | MCPMessageParam | List[MCPMessageParam], request_params: RequestParams | None = None, ) -> List[MCPMessageResult]: - """ - Run an agent using the given message. - This is similar to generating an LLM completion. - - Args: - agent_name: Name of the agent to use. This must be one of the names retrieved using agents/list tool endpoint. - message: The prompt to send to the agent. - request_params: Optional parameters for the request, such as max_tokens and model/model preferences. - - Returns: - The generated response from the agent. - """ server_context: ServerContext = ctx.request_context.lifespan_context # Get or create the agent - this will automatically create agent from config if needed @@ -1542,19 +817,6 @@ async def _agent_generate_str( message: str | MCPMessageParam | List[MCPMessageParam], request_params: RequestParams | None = None, ) -> str: - """ - Run an agent using the given message and return the response as a string. - Use agents/generate for results in the original format, and - use agents/generate_structured for results conforming to a specific schema. - - Args: - agent_name: Name of the agent to use. This must be one of the names retrieved using agents/list tool endpoint. - message: The prompt to send to the agent. - request_params: Optional parameters for the request, such as max_tokens and model/model preferences. - - Returns: - The generated response from the agent. - """ server_context: ServerContext = ctx.request_context.lifespan_context # Get or create the agent - this will automatically create agent from config if needed @@ -1593,48 +855,6 @@ async def _agent_generate_structured( response_schema: Dict[str, Any], request_params: RequestParams | None = None, ) -> Dict[str, Any]: - """ - Generate a structured response from an agent that matches the given schema. - - Args: - agent_name: Name of the agent to use. This must be one of the names retrieved using agents/list tool endpoint. - message: The prompt to send to the agent. - response_schema: The JSON schema that defines the shape to generate the response in. - This schema can be generated using type.schema_json() for a Pydantic model. - request_params: Optional parameters for the request, such as max_tokens and model/model preferences. - - Returns: - A dictionary representation of the structured response. - - Example: - response_schema: - { - "title": "UserProfile", - "type": "object", - "properties": { - "name": { - "title": "Name", - "type": "string" - }, - "age": { - "title": "Age", - "type": "integer", - "minimum": 0 - }, - "email": { - "title": "Email", - "type": "string", - "format": "email", - "pattern": "^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$" - } - }, - "required": [ - "name", - "age", - "email" - ] - } - """ server_context: ServerContext = ctx.request_context.lifespan_context # Get or create the agent - this will automatically create agent from config if needed @@ -1672,33 +892,64 @@ async def _agent_generate_structured( return result.model_dump(mode="json") -async def _run_workflow_and_cleanup(workflow, run_args, instance_id, server_context): - """ - Run a workflow and ensure proper cleanup regardless of outcome. +# endregion - Args: - workflow: The workflow instance to run - run_args: Arguments to pass to the workflow's run method - instance_id: The unique ID for this workflow instance - server_context: The server context for managing active workflows +# region Workflow Utils - Returns: - The result from the workflow's run method - """ + +async def _workflow_run( + ctx: MCPContext, + workflow_name: str, + run_parameters: Dict[str, Any] | None = None, +) -> str: + server_context: ServerContext = ctx.request_context.lifespan_context + app = server_context.context.app + + if workflow_name not in app.workflows: + raise ToolError(f"Workflow '{workflow_name}' not found.") + + # Get the workflow class + workflow_cls = app.workflows[workflow_name] + + # Create and initialize the workflow instance using the factory method try: - # Run the workflow - result = await workflow.run(**run_args) - return result + # Create workflow instance + workflow = await workflow_cls.create(name=workflow_name, context=app.context) + + run_parameters = run_parameters or {} + + # Run the workflow asynchronously and get its ID + workflow_id = await workflow.run_async(**run_parameters) + return workflow_id + except Exception as e: - # Log and propagate exceptions - logger.error(f"Error in workflow {workflow.name} (ID: {instance_id}): {str(e)}") - raise - finally: - try: - # Always attempt to clean up the workflow - await workflow.cleanup() - except Exception as cleanup_error: - # Log but don't fail if cleanup fails - logger.error( - f"Error cleaning up workflow {workflow.name} (ID: {instance_id}): {str(cleanup_error)}" - ) + logger.error(f"Error creating workflow {workflow_name}: {str(e)}") + raise ToolError(f"Error creating workflow {workflow_name}: {str(e)}") from e + + +def _workflow_status( + ctx: MCPContext, workflow_id: str, workflow_name: str | None = None +) -> Dict[str, Any]: + server_context: ServerContext = ctx.request_context.lifespan_context + workflow_registry = server_context.context.workflow_registry + + if not workflow_registry: + raise ToolError("Workflow registry not found for MCPApp Server.") + + # Get the workflow instance from the registry + workflow = workflow_registry.get_workflow(workflow_id) + if not workflow: + raise ToolError(f"Workflow with ID '{workflow_id}' not found.") + + if workflow_name and workflow.name != workflow_name: + raise ToolError( + f"Workflow with ID '{workflow_id}' is not a {workflow_name} workflow." + ) + + # Get the status directly from the workflow instance + status = workflow.get_status() + + return status + + +# endregion diff --git a/src/mcp_agent/context.py b/src/mcp_agent/context.py index 4c22c9f1a..dc3d454d0 100644 --- a/src/mcp_agent/context.py +++ b/src/mcp_agent/context.py @@ -40,11 +40,13 @@ if TYPE_CHECKING: from mcp_agent.human_input.types import HumanInputCallback from mcp_agent.executor.workflow_signal import SignalWaitCallback + from mcp_agent.executor.workflow import WorkflowRegistry from mcp_agent.app import MCPApp else: # Runtime placeholders for the types HumanInputCallback = Any SignalWaitCallback = Any + WorkflowRegistry = Any MCPApp = Any logger = get_logger(__name__) @@ -69,6 +71,7 @@ class Context(BaseModel): server_registry: Optional[ServerRegistry] = None task_registry: Optional[ActivityRegistry] = None decorator_registry: Optional[DecoratorRegistry] = None + workflow_registry: Optional["WorkflowRegistry"] = None tracer: Optional[trace.Tracer] = None @@ -191,6 +194,11 @@ async def initialize_context( context.config = config context.server_registry = ServerRegistry(config=config) + # Import here to avoid circular imports + from mcp_agent.executor.workflow import WorkflowRegistry + + context.workflow_registry = WorkflowRegistry() + context.session_id = session_id # Configure logging and telemetry diff --git a/src/mcp_agent/executor/executor.py b/src/mcp_agent/executor/executor.py index 3449f4f4b..1e92aa942 100644 --- a/src/mcp_agent/executor/executor.py +++ b/src/mcp_agent/executor/executor.py @@ -139,12 +139,22 @@ async def signal( signal_name: str, payload: SignalValueT = None, signal_description: str | None = None, + workflow_id: str | None = None, ) -> None: """ Emit a signal. + + Args: + signal_name: The name of the signal to emit + payload: Optional data to include with the signal + signal_description: Optional human-readable description + workflow_id: Optional workflow ID to target the signal to """ signal = Signal[SignalValueT]( - name=signal_name, payload=payload, description=signal_description + name=signal_name, + payload=payload, + description=signal_description, + workflow_id=workflow_id, ) await self.signal_bus.signal(signal) @@ -224,7 +234,7 @@ async def run_task(task: Callable[..., R] | Coroutine[Any, Any, R]) -> R: return result except Exception as e: - # TODO: saqadri - adding logging or other error handling here + logger.error(f"Error executing task: {e}") return e if self._activity_semaphore: @@ -238,10 +248,26 @@ async def execute( *tasks: Callable[..., R] | Coroutine[Any, Any, R], **kwargs: Any, ) -> List[R | BaseException]: + """ + Execute a list of tasks and return their results. + + Args: + *tasks: The tasks to execute + **kwargs: Additional arguments to pass to the tasks + + Returns: + A list of results or exceptions + """ # TODO: saqadri - validate if async with self.execution_context() is needed here async with self.execution_context(): return await asyncio.gather( - *(self._execute_task(task, **kwargs) for task in tasks), + *( + self._execute_task( + task, + **kwargs, + ) + for task in tasks + ), return_exceptions=True, ) @@ -250,11 +276,26 @@ async def execute_streaming( *tasks: List[Callable[..., R] | Coroutine[Any, Any, R]], **kwargs: Any, ) -> AsyncIterator[R | BaseException]: + """ + Execute tasks and yield results as they complete. + + Args: + *tasks: The tasks to execute + **kwargs: Additional arguments to pass to the tasks + + Yields: + Results or exceptions as tasks complete + """ # TODO: saqadri - validate if async with self.execution_context() is needed here async with self.execution_context(): # Create futures for all tasks futures = [ - asyncio.create_task(self._execute_task(task, **kwargs)) + asyncio.create_task( + self._execute_task( + task, + **kwargs, + ) + ) for task in tasks ] pending = set(futures) @@ -271,8 +312,9 @@ async def signal( signal_name: str, payload: SignalValueT = None, signal_description: str | None = None, + workflow_id: str | None = None, ) -> None: - await super().signal(signal_name, payload, signal_description) + await super().signal(signal_name, payload, signal_description, workflow_id) async def wait_for_signal( self, diff --git a/src/mcp_agent/executor/workflow.py b/src/mcp_agent/executor/workflow.py index 166da38dc..592716c0a 100644 --- a/src/mcp_agent/executor/workflow.py +++ b/src/mcp_agent/executor/workflow.py @@ -1,21 +1,186 @@ from abc import ABC, abstractmethod -from datetime import datetime +from datetime import datetime, timezone from typing import ( Any, Dict, Generic, + Optional, TypeVar, Union, + List, + TYPE_CHECKING, ) +import uuid + from pydantic import BaseModel, ConfigDict, Field -from mcp_agent.executor.executor import Executor +from mcp_agent.context_dependent import ContextDependent +from mcp_agent.executor.workflow_signal import Signal from mcp_agent.logging.logger import get_logger +if TYPE_CHECKING: + from mcp_agent.context import Context + import asyncio + T = TypeVar("T") +class WorkflowRegistry: + """ + Registry for tracking workflow instances. + Provides a central place to register, look up, and manage workflow instances. + """ + + def __init__(self): + self._workflows: Dict[str, "Workflow"] = {} + self._tasks: Dict[str, "asyncio.Task"] = {} + self._logger = get_logger("workflow.registry") + + def register( + self, + workflow_id: str, + workflow: "Workflow", + task: Optional["asyncio.Task"] = None, + ) -> None: + """ + Register a workflow instance and its associated task. + + Args: + workflow_id: The unique ID for the workflow + workflow: The workflow instance + task: The asyncio task running the workflow + """ + self._workflows[workflow_id] = workflow + if task: + self._tasks[workflow_id] = task + + def unregister(self, workflow_id: str) -> None: + """ + Remove a workflow instance from the registry. + + Args: + workflow_id: The unique ID for the workflow + """ + self._workflows.pop(workflow_id, None) + self._tasks.pop(workflow_id, None) + + def get_workflow(self, workflow_id: str) -> Optional["Workflow"]: + """ + Get a workflow instance by ID. + + Args: + workflow_id: The unique ID for the workflow + + Returns: + The workflow instance, or None if not found + """ + return self._workflows.get(workflow_id) + + def get_task(self, workflow_id: str) -> Optional["asyncio.Task"]: + """ + Get the task for a workflow instance by ID. + + Args: + workflow_id: The unique ID for the workflow + + Returns: + The asyncio task, or None if not found + """ + return self._tasks.get(workflow_id) + + async def pause_workflow(self, workflow_id: str) -> bool: + """ + Pause a workflow by ID. + + Args: + workflow_id: The unique ID of the workflow to pause + + Returns: + True if the workflow was paused successfully, False otherwise + """ + workflow = self.get_workflow(workflow_id) + if not workflow: + self._logger.error( + f"Cannot pause workflow {workflow_id}: workflow not found" + ) + return False + + return await workflow.pause() + + async def resume_workflow( + self, workflow_id: str, input_data: Optional[str] = None + ) -> bool: + """ + Resume a workflow by ID. + + Args: + workflow_id: The unique ID of the workflow to resume + input_data: Optional data to provide to the workflow + + Returns: + True if the workflow was resumed successfully, False otherwise + """ + workflow = self.get_workflow(workflow_id) + if not workflow: + self._logger.error( + f"Cannot resume workflow {workflow_id}: workflow not found" + ) + return False + + return await workflow.resume(input_data) + + async def cancel_workflow(self, workflow_id: str) -> bool: + """ + Cancel a workflow by ID. + + Args: + workflow_id: The unique ID of the workflow to cancel + + Returns: + True if the workflow was cancelled successfully, False otherwise + """ + workflow = self.get_workflow(workflow_id) + if not workflow: + self._logger.error( + f"Cannot cancel workflow {workflow_id}: workflow not found" + ) + return False + + return await workflow.cancel() + + def get_workflow_status(self, workflow_id: str) -> Optional[Dict[str, Any]]: + """ + Get the status of a workflow by ID. + + Args: + workflow_id: The unique ID of the workflow to check + + Returns: + The workflow status if found, None otherwise + """ + workflow = self.get_workflow(workflow_id) + if not workflow: + return None + + return workflow.get_status() + + def list_workflows(self) -> List[Dict[str, Any]]: + """ + List all registered workflow instances with their status. + + Returns: + A list of dictionaries with workflow information + """ + result = [] + for workflow_id, workflow in self._workflows.items(): + # Get the workflow status directly to have consistent behavior + status = workflow.get_status() + result.append(status) + + return result + + class WorkflowState(BaseModel): """ Simple container for persistent workflow state. @@ -44,7 +209,7 @@ class WorkflowResult(BaseModel, Generic[T]): end_time: float | None = None -class Workflow(ABC, Generic[T]): +class Workflow(ABC, Generic[T], ContextDependent): """ Base class for user-defined workflows. Handles execution and state management. @@ -54,34 +219,53 @@ class Workflow(ABC, Generic[T]): Some key notes: - To enable the executor engine to recognize and orchestrate the workflow, - - the class MUST be decorated with @workflow. - - the main entrypoint method MUST be decorated with @workflow_run. - - any task methods MUST be decorated with @workflow_task. + - the class MUST be decorated with @app.workflow. - Persistent state: Provides a simple `state` object for storing data across tasks. + - Lifecycle management: Provides run_async, pause, resume, cancel, and get_status methods. """ def __init__( self, - executor: Executor, name: str | None = None, metadata: Dict[str, Any] | None = None, + context: Optional["Context"] = None, **kwargs: Any, ): - self.executor = executor + # Initialize the ContextDependent mixin + ContextDependent.__init__(self, context=context) + self.name = name or self.__class__.__name__ self.init_kwargs = kwargs self._logger = get_logger(f"workflow.{self.name}") self._initialized = False + self._workflow_id = None + self._run_task = None # A simple workflow state object # If under Temporal, storing it as a field on this class # means it can be replayed automatically self.state = WorkflowState(metadata=metadata or {}) + @property + def executor(self): + """Get the workflow executor from the context.""" + executor = self.context.executor + if executor is None: + raise ValueError("No executor available in context") + return executor + + @property + def id(self) -> str | None: + """ + Get the workflow ID if it has been assigned. + NOTE: The run() method will assign a new workflow ID on every run. + """ + return self._workflow_id + @classmethod async def create( - cls, executor: Executor, name: str | None = None, **kwargs: Any + cls, name: str | None = None, context: Optional["Context"] = None, **kwargs: Any ) -> "Workflow": """ Factory method to create and initialize a workflow instance. @@ -90,14 +274,14 @@ async def create( Subclasses can override this method for custom initialization logic. Args: - executor: The executor to use for this workflow name: Optional name for the workflow (defaults to class name) + context: Optional context to use (falls back to global context if not provided) **kwargs: Additional parameters to pass to the workflow constructor Returns: An initialized workflow instance """ - workflow = cls(executor=executor, name=name, **kwargs) + workflow = cls(name=name, context=context, **kwargs) await workflow.initialize() return workflow @@ -115,6 +299,289 @@ async def run(self, *args: Any, **kwargs: Any) -> "WorkflowResult[T]": WorkflowResult containing the output of the workflow """ + async def run_async(self, *args: Any, **kwargs: Any) -> str: + """ + Run the workflow asynchronously and return a workflow ID. + + This creates an async task that will be executed through the executor + and returns immediately with a workflow ID that can be used to + check status, pause, resume, or cancel. + + Args: + *args: Positional arguments to pass to the run method + **kwargs: Keyword arguments to pass to the run method + + Returns: + str: A unique workflow ID that can be used to reference this workflow instance + """ + + import asyncio + from concurrent.futures import CancelledError + + # Generate a unique ID for this workflow instance + if not self._workflow_id: + self._workflow_id = str(uuid.uuid4()) + + self.update_status("scheduled") + + # Define the workflow execution function + async def _execute_workflow(): + try: + # Run the workflow through the executor with pause/cancel monitoring + self.update_status("running") + + # Create a coroutine for the run method + run_coro = self.run(*args, **kwargs) + + # Create a task to monitor + run_task = asyncio.create_task(run_coro) + + # Monitor and handle pause/cancel signals while the task runs + while not run_task.done(): + # Check for signals + signal_bus = self.executor.signal_bus + has_cancel = await signal_bus.has_signal( + "cancel", workflow_id=self._workflow_id + ) + has_pause = await signal_bus.has_signal( + "pause", workflow_id=self._workflow_id + ) + + # Handle cancel signal (highest priority) + if has_cancel: + self._logger.info( + f"Cancel signal received for workflow {self._workflow_id}" + ) + self.update_status("cancelling") + run_task.cancel() + break + + # Handle pause signal + if has_pause and self.state.status != "paused": + self._logger.info( + f"Pause signal received for workflow {self._workflow_id}" + ) + self.update_status("paused") + + # Wait for resume signal + await self.executor.wait_for_signal( + "resume", + workflow_id=self._workflow_id, + signal_description="Waiting for resume signal", + ) + + # Resumed + self._logger.info( + f"Resume signal received for workflow {self._workflow_id}" + ) + self.update_status("running") + + # Wait a bit or until the task completes + try: + done, _ = await asyncio.wait([run_task], timeout=0.5) + if run_task in done: + break + except Exception as e: + self._logger.error(f"Error waiting for task: {e}") + + # Get the result (or exception) + result = await run_task + self.update_status("completed") + return result + except CancelledError: + # Handle cancellation gracefully + self._logger.info( + f"Workflow {self.name} (ID: {self._workflow_id}) was cancelled" + ) + self.update_status("cancelled") + raise + except Exception as e: + # Log and propagate exceptions + self._logger.error( + f"Error in workflow {self.name} (ID: {self._workflow_id}): {str(e)}" + ) + self.update_status("error") + self.state.record_error(e) + raise + finally: + try: + # Always attempt to clean up the workflow + await self.cleanup() + except Exception as cleanup_error: + # Log but don't fail if cleanup fails + self._logger.error( + f"Error cleaning up workflow {self.name} (ID: {self._workflow_id}): {str(cleanup_error)}" + ) + + # Unregister from the workflow registry (if available) + if self.context and self.context.workflow_registry: + self.context.workflow_registry.unregister(self._workflow_id) + + # Create a task that doesn't block + # This approach supports both asyncio and future Temporal integration + # TODO: saqadri (MAC) - figure out how to do this for different executors. + # For Temporal, we would replace this with workflow.start() which also doesn't block + self._run_task = asyncio.create_task(_execute_workflow()) + + # Register this workflow with the registry + if self.context and self.context.workflow_registry: + self.context.workflow_registry.register( + self._workflow_id, self, self._run_task + ) + + return self._workflow_id + + async def pause(self) -> bool: + """ + Pause the workflow by sending a pause signal. + + Returns: + bool: True if the pause signal was sent successfully, False otherwise + """ + if not self._workflow_id: + self._logger.error("Cannot pause workflow with no ID") + return False + + try: + await self.executor.signal("pause", workflow_id=self._workflow_id) + self._logger.info(f"Pause signal sent to workflow {self._workflow_id}") + # Note: We don't update status to paused here - it will be updated + # in the run_async method when the workflow actually pauses + return True + except Exception as e: + self._logger.error( + f"Error sending pause signal to workflow {self._workflow_id}: {e}" + ) + return False + + async def resume( + self, signal_name: str | None = "resume", payload: str | None = None + ) -> bool: + """ + Resume a paused workflow, optionally providing input data. + + Args: + signal_name: The name of the signal to send (default: "resume") + payload: Optional data to provide to the workflow upon resuming + + Returns: + bool: True if the resume signal was sent successfully, False otherwise + """ + if not self._workflow_id: + self._logger.error("Cannot resume workflow with no ID") + return False + + try: + signal = Signal( + name=signal_name, workflow_id=self._workflow_id, payload=payload + ) + await self.executor.signal_bus.signal(signal) + self._logger.info( + f"{signal_name} signal sent to workflow {self._workflow_id}" + ) + # Note: We don't update status to running here - it will be updated + # when the workflow actually resumes in run_async + return True + except Exception as e: + self._logger.error( + f"Error sending resume signal to workflow {self._workflow_id}: {e}" + ) + return False + + async def cancel(self) -> bool: + """ + Cancel the workflow by sending a cancel signal and cancelling its task. + + Returns: + bool: True if the workflow was cancelled successfully, False otherwise + """ + if not self._workflow_id: + self._logger.error("Cannot cancel workflow with no ID") + return False + + try: + # First signal the workflow to cancel - this allows for graceful cancellation + # when the workflow checks for cancellation + self._logger.info(f"Sending cancel signal to workflow {self._workflow_id}") + await self.executor.signal("cancel", workflow_id=self._workflow_id) + + # Then forcibly cancel the task if we have one and it's still running + # This ensures cancellation even if the workflow doesn't check for signals + if self._run_task and not self._run_task.done(): + self._logger.info( + f"Forcibly cancelling task for workflow {self._workflow_id}" + ) + self._run_task.cancel() + + # Update the workflow state + self.update_status("cancelled") + + # Clean up the workflow + await self.cleanup() + + # Unregister from the workflow registry + if self.context and self.context.workflow_registry: + self.context.workflow_registry.unregister(self._workflow_id) + + return True + except Exception as e: + self._logger.error(f"Error cancelling workflow {self._workflow_id}: {e}") + return False + + def get_status(self) -> Dict[str, Any]: + """ + Get the current status of the workflow. + + Returns: + Dict[str, Any]: A dictionary with workflow status information + """ + status = { + "id": self._workflow_id, + "name": self.name, + "running": self._run_task is not None and not self._run_task.done() + if self._run_task + else False, + "state": self.state.model_dump() + if hasattr(self.state, "model_dump") + else self.state.__dict__, + } + + # Add result/error information if the task is done + if self._run_task and self._run_task.done(): + try: + result = self._run_task.result() + + # Convert result to a useful format + if hasattr(result, "model_dump"): + result_data = result.model_dump() + elif hasattr(result, "__dict__"): + result_data = result.__dict__ + else: + result_data = str(result) + + status["result"] = result_data + status["completed"] = True + status["error"] = None + except Exception as e: + status["result"] = None + status["completed"] = False + status["error"] = str(e) + status["exception_type"] = type(e).__name__ + + return status + + def update_status(self, status: str) -> None: + """ + Update the workflow status. + + Args: + status: The new status to set + """ + self.state.status = status + self.state.updated_at = datetime.now(timezone.utc).timestamp() + + # Static registry methods have been moved to the WorkflowRegistry class + async def update_state(self, **kwargs): """Syntactic sugar to update workflow state.""" for key, value in kwargs.items(): @@ -175,102 +642,3 @@ async def __aenter__(self): async def __aexit__(self, exc_type, exc_val, exc_tb): """Support for async context manager pattern.""" await self.cleanup() - - -# ############################ -# # Example: DocumentWorkflow -# ############################ - - -# @workflow_defn # <-- This becomes @temporal_workflow.defn if in Temporal mode, else no-op -# class DocumentWorkflow(Workflow[List[Dict[str, Any]]]): -# """ -# Example workflow with persistent state. -# If run locally, `self.state` is ephemeral. -# If run in Temporal mode, `self.state` is replayed automatically. -# """ - -# @workflow_task( -# schedule_to_close_timeout=timedelta(minutes=10), -# retry_policy={"initial_interval": 1, "max_attempts": 3}, -# ) -# async def process_document(self, doc_id: str) -> Dict[str, Any]: -# """Activity that simulates document processing.""" -# await asyncio.sleep(1) -# # Optionally mutate workflow state -# self.state.metadata.setdefault("processed_docs", []).append(doc_id) -# return { -# "doc_id": doc_id, -# "status": "processed", -# "timestamp": datetime.utcnow().isoformat(), -# } - -# @workflow_run # <-- This becomes @temporal_workflow.run(...) if Temporal is used -# async def _run_impl( -# self, documents: List[str], batch_size: int = 2 -# ) -> List[Dict[str, Any]]: -# """Main workflow logic, which becomes the official 'run' in Temporal mode.""" -# self._logger.info("Workflow starting, state=%s", self.state) -# self.state.update_status("running") - -# all_results = [] -# for i in range(0, len(documents), batch_size): -# batch = documents[i : i + batch_size] -# tasks = [self.process_document(doc) for doc in batch] -# results = await self.executor.execute(*tasks) - -# for res in results: -# if isinstance(res.value, Exception): -# self._logger.error( -# f"Error processing document: {res.metadata.get('error')}" -# ) -# else: -# all_results.append(res.value) - -# self.state.update_status("completed") -# return all_results - - -# ######################## -# # 12. Example Local Usage -# ######################## - - -# async def run_example_local(): -# from . import AsyncIOExecutor, DocumentWorkflow # if in a package - -# executor = AsyncIOExecutor() -# wf = DocumentWorkflow(executor) - -# documents = ["doc1", "doc2", "doc3", "doc4"] -# result = await wf.run(documents, batch_size=2) - -# print("Local results:", result.value) -# print("Local workflow final state:", wf.state) -# # Notice `wf.state.metadata['processed_docs']` has the processed doc IDs. - - -# ######################## -# # Example Temporal Usage -# ######################## - - -# async def run_example_temporal(): -# from . import TemporalExecutor, DocumentWorkflow # if in a package - -# # 1) Create a TemporalExecutor (client side) -# executor = TemporalExecutor(task_queue="my_task_queue") -# await executor.ensure_client() - -# # 2) Start a worker in the same process (or do so in a separate process) -# asyncio.create_task(executor.start_worker()) -# await asyncio.sleep(2) # Wait for worker to be up - -# # 3) Now we can run the workflow by normal means if we like, -# # or rely on the Worker picking it up. Typically, you'd do: -# # handle = await executor._client.start_workflow(...) -# # but let's keep it simple and show conceptually -# # that 'DocumentWorkflow' is now recognized as a real Temporal workflow -# print( -# "Temporal environment is running. Use the Worker logs or CLI to start 'DocumentWorkflow'." -# ) diff --git a/src/mcp_agent/fast_app.py b/src/mcp_agent/fast_app.py index 368cf363f..214b48951 100644 --- a/src/mcp_agent/fast_app.py +++ b/src/mcp_agent/fast_app.py @@ -2,20 +2,17 @@ FastMCPApp - Extended MCPApp with declarative agent and workflow configuration. """ -from typing import Callable, List, Optional, Tuple +from typing import Callable, List from mcp_agent.app import MCPApp -from mcp_agent.agents.agent import Agent from mcp_agent.agents.agent_config import ( AgentConfig, AugmentedLLMConfig, - ParallelWorkflowConfig, - OrchestratorWorkflowConfig, - RouterWorkflowConfig, - EvaluatorOptimizerWorkflowConfig, - SwarmWorkflowConfig, + ParallelLLMConfig, + OrchestratorLLMConfig, + RouterConfig, + EvaluatorOptimizerConfig, ) -from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM class FastMCPApp(MCPApp): @@ -110,7 +107,7 @@ def decorator(config_fn): config = AgentConfig( name=name, instruction=instruction, - parallel_config=ParallelWorkflowConfig( + parallel_config=ParallelLLMConfig( fan_in_agent=fan_in, fan_out_agents=fan_out ), ) @@ -161,7 +158,7 @@ def decorator(config_fn): config = AgentConfig( name=name, instruction=instruction, - orchestrator_config=OrchestratorWorkflowConfig( + orchestrator_config=OrchestratorLLMConfig( available_agents=available_agents ), ) @@ -210,7 +207,7 @@ def decorator(config_fn): config = AgentConfig( name=name, instruction=instruction, - router_config=RouterWorkflowConfig( + router_config=RouterConfig( agent_names=agent_names, router_type=router_type ), ) @@ -259,7 +256,7 @@ def decorator(config_fn): config = AgentConfig( name=name, instruction=instruction, - evaluator_optimizer_config=EvaluatorOptimizerWorkflowConfig( + evaluator_optimizer_config=EvaluatorOptimizerConfig( evaluator_agent=evaluator, optimizer_agent=optimizer ), ) @@ -272,261 +269,3 @@ def decorator(config_fn): return config return decorator - - def swarm( - self, - name: str, - instruction: str, - agents: List[str], - llm_factory: Callable = None, - **kwargs, - ): - """ - Decorator to define a swarm workflow agent. - - Example: - @app.swarm("team", "A collaborative team of agents", - agents=["leader", "researcher", "writer"]) - def team_config(config): - config.swarm_config.context_variables = {"priority": "accuracy"} - return config - - Args: - name: The name of the workflow agent - instruction: The agent's instruction/system prompt - agents: List of agent names in the swarm - llm_factory: Optional LLM factory for the workflow - **kwargs: Additional parameters - - Returns: - Decorator function that registers the swarm workflow configuration - """ - - def decorator(config_fn): - config = AgentConfig( - name=name, - instruction=instruction, - swarm_config=SwarmWorkflowConfig(agents=agents), - ) - - if llm_factory: - config.llm_config = AugmentedLLMConfig(factory=llm_factory) - - config = config_fn(config) - self._agent_configs[name] = config - return config - - return decorator - - async def create_agent(self, name: str) -> Tuple[Agent, Optional[AugmentedLLM]]: - """ - Create an agent with its configured workflow. - - Args: - name: The name of the registered agent configuration - - Returns: - Tuple of (agent instance, augmented LLM or workflow instance) - - Raises: - ValueError: If no agent configuration with the given name exists - """ - if name not in self._agent_configs: - raise ValueError(f"No agent configuration named '{name}' is registered") - - config = self._agent_configs[name] - - # Create and initialize the basic agent - agent = config.create_agent(context=self._context) - await agent.initialize() - - # Handle different workflow types with type-safe configs - workflow_type = config.get_workflow_type() - - if workflow_type is None and config.llm_config is not None: - # Basic agent with simple LLM - llm_instance = await config.llm_config.create_llm() - llm = await agent.attach_llm(lambda: llm_instance) - return agent, llm - - elif workflow_type == "parallel": - # Create a Parallel workflow with type-safe config - parallel_config = config.parallel_config - from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM - - # Get referenced agents - fan_in_agent, _ = await self.create_agent(parallel_config.fan_in_agent) - fan_out_agents = [] - for agent_name in parallel_config.fan_out_agents: - fan_out_agent, _ = await self.create_agent(agent_name) - fan_out_agents.append(fan_out_agent) - - # Get LLM factory - llm_factory = config.llm_config.factory if config.llm_config else None - - # Create parallel workflow - parallel = ParallelLLM( - fan_in_agent=fan_in_agent, - fan_out_agents=fan_out_agents, - llm_factory=llm_factory, - concurrent=parallel_config.concurrent, - synchronize_fan_out_models=parallel_config.synchronize_fan_out_models, - **parallel_config.extra_params, - ) - - return agent, parallel - - elif workflow_type == "orchestrator": - # Create an Orchestrator workflow with type-safe config - orchestrator_config = config.orchestrator_config - from mcp_agent.workflows.orchestrator.orchestrator import Orchestrator - - # Get referenced agents - available_agents = [] - for agent_name in orchestrator_config.available_agents: - available_agent, _ = await self.create_agent(agent_name) - available_agents.append(available_agent) - - # Get the LLM factory - llm_factory = config.llm_config.factory if config.llm_config else None - - # Optional planner agent - planner = None - if orchestrator_config.planner_agent: - planner, _ = await self.create_agent(orchestrator_config.planner_agent) - - # Create the orchestrator - orchestrator = Orchestrator( - llm_factory=llm_factory, - available_agents=available_agents, - planner=planner, - max_iterations=orchestrator_config.max_iterations, - **orchestrator_config.extra_params, - ) - - return agent, orchestrator - - elif workflow_type == "router": - # Create a Router workflow with type-safe config - router_config = config.router_config - - # Get referenced agents - agents = [] - for agent_name in router_config.agent_names: - agent_inst, _ = await self.create_agent(agent_name) - agents.append(agent_inst) - - # Determine which router implementation to use - if router_config.router_type == "llm": - from mcp_agent.workflows.router.router_llm import LLMRouter - - # Get LLM factory - llm_factory = config.llm_config.factory if config.llm_config else None - llm_instance = None - if llm_factory: - llm_instance = await config.llm_config.create_llm() - - # Create the router - router = LLMRouter( - llm=llm_instance, agents=agents, **router_config.extra_params - ) - - else: # embedding router - # Create the router (implementation depends on embedding model) - if router_config.embedding_model == "cohere": - from mcp_agent.workflows.router.router_embedding_cohere import ( - CohereEmbeddingRouter, - ) - - router = CohereEmbeddingRouter( - agents=agents, **router_config.extra_params - ) - else: - from mcp_agent.workflows.router.router_embedding_openai import ( - OpenAIEmbeddingRouter, - ) - - router = OpenAIEmbeddingRouter( - agents=agents, **router_config.extra_params - ) - - return agent, router - - elif workflow_type == "evaluator_optimizer": - # Create an Evaluator-Optimizer workflow with type-safe config - eo_config = config.evaluator_optimizer_config - from mcp_agent.workflows.evaluator_optimizer.evaluator_optimizer import ( - EvaluatorOptimizerLLM, - QualityRating, - ) - - # Get referenced agents - evaluator_agent, _ = await self.create_agent(eo_config.evaluator_agent) - optimizer_agent, _ = await self.create_agent(eo_config.optimizer_agent) - - # Get LLM factory - llm_factory = config.llm_config.factory if config.llm_config else None - - # Parse min_rating string to enum - min_rating = QualityRating.GOOD # Default - try: - min_rating = QualityRating[eo_config.min_rating.upper()] - except (KeyError, AttributeError): - pass - - # Create the evaluator-optimizer - eo = EvaluatorOptimizerLLM( - evaluator=evaluator_agent, - optimizer=optimizer_agent, - llm_factory=llm_factory, - min_rating=min_rating, - max_iterations=eo_config.max_iterations, - **eo_config.extra_params, - ) - - return agent, eo - - elif workflow_type == "swarm": - # Create a Swarm workflow with type-safe config - swarm_config = config.swarm_config - - # Choose the swarm implementation based on LLM factory - llm_factory = config.llm_config.factory if config.llm_config else None - - if not llm_factory: - raise ValueError("A LLM factory is required for Swarm workflow") - - # Get the factory class name to determine which Swarm implementation to use - factory_class_name = llm_factory.__name__ - - if "Anthropic" in factory_class_name: - from mcp_agent.workflows.swarm.swarm_anthropic import AnthropicSwarm - - # Get the primary agent - primary_agent, _ = await self.create_agent(swarm_config.agents[0]) - - # Create the swarm - swarm = AnthropicSwarm( - agent=primary_agent, - context_variables=swarm_config.context_variables, - **swarm_config.extra_params, - ) - else: - # Default to OpenAI swarm - from mcp_agent.workflows.swarm.swarm_openai import OpenAISwarm - - # Get the primary agent - primary_agent, _ = await self.create_agent(swarm_config.agents[0]) - - # Create the swarm - swarm = OpenAISwarm( - agent=primary_agent, - context_variables=swarm_config.context_variables, - **swarm_config.extra_params, - ) - - return agent, swarm - - else: - # No workflow or LLM config, just return the basic agent - return agent, None From 4c45e80c4e75cdd22c18182735100d2feeaede9a Mon Sep 17 00:00:00 2001 From: Sarmad Qadri Date: Thu, 3 Apr 2025 12:24:32 -0400 Subject: [PATCH 4/9] Updates to workflow --- src/mcp_agent/executor/workflow.py | 101 ++++++----------------------- 1 file changed, 20 insertions(+), 81 deletions(-) diff --git a/src/mcp_agent/executor/workflow.py b/src/mcp_agent/executor/workflow.py index 592716c0a..c332f6304 100644 --- a/src/mcp_agent/executor/workflow.py +++ b/src/mcp_agent/executor/workflow.py @@ -30,6 +30,10 @@ class WorkflowRegistry: """ Registry for tracking workflow instances. Provides a central place to register, look up, and manage workflow instances. + + TODO: saqadri (MAC) - How does this work with proper workflow orchestration? + For example, when using Temporal, this registry should interface with the + workflow service to manage workflow instances. """ def __init__(self): @@ -77,28 +81,7 @@ def get_workflow(self, workflow_id: str) -> Optional["Workflow"]: """ return self._workflows.get(workflow_id) - def get_task(self, workflow_id: str) -> Optional["asyncio.Task"]: - """ - Get the task for a workflow instance by ID. - - Args: - workflow_id: The unique ID for the workflow - - Returns: - The asyncio task, or None if not found - """ - return self._tasks.get(workflow_id) - async def pause_workflow(self, workflow_id: str) -> bool: - """ - Pause a workflow by ID. - - Args: - workflow_id: The unique ID of the workflow to pause - - Returns: - True if the workflow was paused successfully, False otherwise - """ workflow = self.get_workflow(workflow_id) if not workflow: self._logger.error( @@ -109,18 +92,11 @@ async def pause_workflow(self, workflow_id: str) -> bool: return await workflow.pause() async def resume_workflow( - self, workflow_id: str, input_data: Optional[str] = None + self, + workflow_id: str, + signal_name: str | None = "resume", + payload: str | None = None, ) -> bool: - """ - Resume a workflow by ID. - - Args: - workflow_id: The unique ID of the workflow to resume - input_data: Optional data to provide to the workflow - - Returns: - True if the workflow was resumed successfully, False otherwise - """ workflow = self.get_workflow(workflow_id) if not workflow: self._logger.error( @@ -128,18 +104,9 @@ async def resume_workflow( ) return False - return await workflow.resume(input_data) + return await workflow.resume(signal_name, payload) async def cancel_workflow(self, workflow_id: str) -> bool: - """ - Cancel a workflow by ID. - - Args: - workflow_id: The unique ID of the workflow to cancel - - Returns: - True if the workflow was cancelled successfully, False otherwise - """ workflow = self.get_workflow(workflow_id) if not workflow: self._logger.error( @@ -187,6 +154,7 @@ class WorkflowState(BaseModel): This can hold fields that should persist across tasks. """ + # TODO: saqadri - (MAC) - This should be a proper status enum status: str = "initialized" metadata: Dict[str, Any] = Field(default_factory=dict) updated_at: float | None = None @@ -198,7 +166,7 @@ def record_error(self, error: Exception) -> None: self.error = { "type": type(error).__name__, "message": str(error), - "timestamp": datetime.utcnow().timestamp(), + "timestamp": datetime.now(timezone.utc).timestamp(), } @@ -218,9 +186,7 @@ class Workflow(ABC, Generic[T], ContextDependent): Typically, workflows are registered with an MCPApp and can be exposed as MCP tools via app_server.py. Some key notes: - - To enable the executor engine to recognize and orchestrate the workflow, - - the class MUST be decorated with @app.workflow. - + - The class MUST be decorated with @app.workflow. - Persistent state: Provides a simple `state` object for storing data across tasks. - Lifecycle management: Provides run_async, pause, resume, cancel, and get_status methods. """ @@ -236,7 +202,6 @@ def __init__( ContextDependent.__init__(self, context=context) self.name = name or self.__class__.__name__ - self.init_kwargs = kwargs self._logger = get_logger(f"workflow.{self.name}") self._initialized = False self._workflow_id = None @@ -307,6 +272,10 @@ async def run_async(self, *args: Any, **kwargs: Any) -> str: and returns immediately with a workflow ID that can be used to check status, pause, resume, or cancel. + TODO: saqadri - (MAC) - This needs to be updated to use + the executor for proper workflow orchestration. For example, asyncio vs. Temporal. + Current implementation only works with asyncio. + Args: *args: Positional arguments to pass to the run method **kwargs: Keyword arguments to pass to the run method @@ -348,7 +317,7 @@ async def _execute_workflow(): ) # Handle cancel signal (highest priority) - if has_cancel: + if has_cancel and self.state.status != "cancelling": self._logger.info( f"Cancel signal received for workflow {self._workflow_id}" ) @@ -417,8 +386,6 @@ async def _execute_workflow(): if self.context and self.context.workflow_registry: self.context.workflow_registry.unregister(self._workflow_id) - # Create a task that doesn't block - # This approach supports both asyncio and future Temporal integration # TODO: saqadri (MAC) - figure out how to do this for different executors. # For Temporal, we would replace this with workflow.start() which also doesn't block self._run_task = asyncio.create_task(_execute_workflow()) @@ -504,25 +471,6 @@ async def cancel(self) -> bool: # when the workflow checks for cancellation self._logger.info(f"Sending cancel signal to workflow {self._workflow_id}") await self.executor.signal("cancel", workflow_id=self._workflow_id) - - # Then forcibly cancel the task if we have one and it's still running - # This ensures cancellation even if the workflow doesn't check for signals - if self._run_task and not self._run_task.done(): - self._logger.info( - f"Forcibly cancelling task for workflow {self._workflow_id}" - ) - self._run_task.cancel() - - # Update the workflow state - self.update_status("cancelled") - - # Clean up the workflow - await self.cleanup() - - # Unregister from the workflow registry - if self.context and self.context.workflow_registry: - self.context.workflow_registry.unregister(self._workflow_id) - return True except Exception as e: self._logger.error(f"Error cancelling workflow {self._workflow_id}: {e}") @@ -589,16 +537,7 @@ async def update_state(self, **kwargs): self.state[key] = value setattr(self.state, key, value) - self.state.updated_at = datetime.utcnow().timestamp() - - async def wait_for_input(self, description: str = "Provide input") -> str: - """ - Convenience method for human input. Uses `human_input` signal - so we can unify local (console input) and Temporal signals. - """ - return await self.executor.wait_for_signal( - "human_input", description=description - ) + self.state.updated_at = datetime.now(timezone.utc).timestamp() async def initialize(self): """ @@ -614,7 +553,7 @@ async def initialize(self): self.state.status = "initializing" self._logger.debug(f"Initializing workflow {self.name}") self._initialized = True - self.state.updated_at = datetime.utcnow().timestamp() + self.state.updated_at = datetime.now(timezone.utc).timestamp() async def cleanup(self): """ @@ -632,7 +571,7 @@ async def cleanup(self): self._logger.debug(f"Cleaning up workflow {self.name}") self._initialized = False self.state.status = "cleaned_up" - self.state.updated_at = datetime.utcnow().timestamp() + self.state.updated_at = datetime.now(timezone.utc).timestamp() async def __aenter__(self): """Support for async context manager pattern.""" From cd6720f967a3719b32fc9a13672ed7536cae6622 Mon Sep 17 00:00:00 2001 From: Sarmad Qadri Date: Thu, 3 Apr 2025 13:22:16 -0400 Subject: [PATCH 5/9] Simplify workflows to remove pause functionality. --- src/mcp_agent/app_server.py | 33 +------ src/mcp_agent/executor/workflow.py | 134 +++++++++-------------------- 2 files changed, 45 insertions(+), 122 deletions(-) diff --git a/src/mcp_agent/app_server.py b/src/mcp_agent/app_server.py index 0a4ce17c6..334d9e228 100644 --- a/src/mcp_agent/app_server.py +++ b/src/mcp_agent/app_server.py @@ -357,7 +357,7 @@ def list_workflows(ctx: MCPContext) -> Dict[str, Dict[str, Any]]: result[workflow_name] = { "name": workflow_name, "description": workflow_cls.__doc__ or run_fn_tool.description, - "capabilities": ["run", "pause", "resume", "cancel", "get_status"], + "capabilities": ["run", "resume", "cancel", "get_status"], "tool_endpoints": endpoints, "run_parameters": run_fn_tool.parameters, } @@ -398,7 +398,7 @@ async def run_workflow( Returns: The workflow ID of the started workflow run, which can be passed to - workflows/get_status, workflows/pause, workflows/resume, and workflows/cancel. + workflows/get_status, workflows/resume, and workflows/cancel. """ return await _workflow_run(ctx, workflow_name, run_parameters) @@ -419,35 +419,6 @@ def get_workflow_status(ctx: MCPContext, workflow_id: str) -> Dict[str, Any]: """ return _workflow_status(ctx, workflow_id) - @mcp.tool(name="workflows/pause") - async def pause_workflow( - ctx: MCPContext, - workflow_id: str, - ) -> bool: - """ - Pause a running workflow. - - Args: - workflow_id: The ID of the workflow to pause, - received from workflows/run or workflows/runs/list. - - Returns: - True if the workflow was paused, False otherwise. - """ - server_context: ServerContext = ctx.request_context.lifespan_context - workflow_registry = server_context.context.workflow_registry - - if not workflow_registry: - raise ToolError("Workflow registry not found for MCPApp Server.") - - # Get the workflow instance from the registry - workflow = workflow_registry.get_workflow(workflow_id) - if not workflow: - raise ValueError(f"Workflow with ID '{workflow_id}' not found.") - - # Pause the workflow directly - return await workflow.pause() - @mcp.tool(name="workflows/resume") async def resume_workflow( ctx: MCPContext, diff --git a/src/mcp_agent/executor/workflow.py b/src/mcp_agent/executor/workflow.py index c332f6304..ef75d9fbb 100644 --- a/src/mcp_agent/executor/workflow.py +++ b/src/mcp_agent/executor/workflow.py @@ -81,16 +81,6 @@ def get_workflow(self, workflow_id: str) -> Optional["Workflow"]: """ return self._workflows.get(workflow_id) - async def pause_workflow(self, workflow_id: str) -> bool: - workflow = self.get_workflow(workflow_id) - if not workflow: - self._logger.error( - f"Cannot pause workflow {workflow_id}: workflow not found" - ) - return False - - return await workflow.pause() - async def resume_workflow( self, workflow_id: str, @@ -264,13 +254,29 @@ async def run(self, *args: Any, **kwargs: Any) -> "WorkflowResult[T]": WorkflowResult containing the output of the workflow """ + async def _cancel_task(self): + """ + Wait for a cancel signal and cancel the workflow task. + """ + signal = await self.executor.wait_for_signal( + "cancel", + workflow_id=self._workflow_id, + signal_description="Waiting for cancel signal", + ) + + self._logger.info(f"Cancel signal received for workflow {self._workflow_id}") + self.update_status("cancelling") + + # The run task will be cancelled in the run_async method + return signal + async def run_async(self, *args: Any, **kwargs: Any) -> str: """ Run the workflow asynchronously and return a workflow ID. This creates an async task that will be executed through the executor and returns immediately with a workflow ID that can be used to - check status, pause, resume, or cancel. + check status, resume, or cancel. TODO: saqadri - (MAC) - This needs to be updated to use the executor for proper workflow orchestration. For example, asyncio vs. Temporal. @@ -299,64 +305,34 @@ async def _execute_workflow(): # Run the workflow through the executor with pause/cancel monitoring self.update_status("running") - # Create a coroutine for the run method - run_coro = self.run(*args, **kwargs) + run_task = asyncio.create_task(self.run(*args, **kwargs)) + cancel_task = asyncio.create_task(self._cancel_task()) - # Create a task to monitor - run_task = asyncio.create_task(run_coro) - - # Monitor and handle pause/cancel signals while the task runs - while not run_task.done(): - # Check for signals - signal_bus = self.executor.signal_bus - has_cancel = await signal_bus.has_signal( - "cancel", workflow_id=self._workflow_id - ) - has_pause = await signal_bus.has_signal( - "pause", workflow_id=self._workflow_id + # Simply wait for either the run task or cancel task to complete + try: + # Wait for either task to complete, whichever happens first + done, _ = await asyncio.wait( + [run_task, cancel_task], return_when=asyncio.FIRST_COMPLETED ) - # Handle cancel signal (highest priority) - if has_cancel and self.state.status != "cancelling": - self._logger.info( - f"Cancel signal received for workflow {self._workflow_id}" - ) - self.update_status("cancelling") + # Check which task completed + if cancel_task in done: + # Cancel signal received, cancel the run task run_task.cancel() - break - - # Handle pause signal - if has_pause and self.state.status != "paused": - self._logger.info( - f"Pause signal received for workflow {self._workflow_id}" - ) - self.update_status("paused") - - # Wait for resume signal - await self.executor.wait_for_signal( - "resume", - workflow_id=self._workflow_id, - signal_description="Waiting for resume signal", - ) - - # Resumed - self._logger.info( - f"Resume signal received for workflow {self._workflow_id}" - ) - self.update_status("running") - - # Wait a bit or until the task completes - try: - done, _ = await asyncio.wait([run_task], timeout=0.5) - if run_task in done: - break - except Exception as e: - self._logger.error(f"Error waiting for task: {e}") - - # Get the result (or exception) - result = await run_task - self.update_status("completed") - return result + self.update_status("cancelled") + raise CancelledError("Workflow was cancelled") + elif run_task in done: + # Run task completed, cancel the cancel task + cancel_task.cancel() + # Get the result (or propagate any exception) + result = await run_task + self.update_status("completed") + return result + + except Exception as e: + self._logger.error(f"Error waiting for tasks: {e}") + raise + except CancelledError: # Handle cancellation gracefully self._logger.info( @@ -398,34 +374,11 @@ async def _execute_workflow(): return self._workflow_id - async def pause(self) -> bool: - """ - Pause the workflow by sending a pause signal. - - Returns: - bool: True if the pause signal was sent successfully, False otherwise - """ - if not self._workflow_id: - self._logger.error("Cannot pause workflow with no ID") - return False - - try: - await self.executor.signal("pause", workflow_id=self._workflow_id) - self._logger.info(f"Pause signal sent to workflow {self._workflow_id}") - # Note: We don't update status to paused here - it will be updated - # in the run_async method when the workflow actually pauses - return True - except Exception as e: - self._logger.error( - f"Error sending pause signal to workflow {self._workflow_id}: {e}" - ) - return False - async def resume( self, signal_name: str | None = "resume", payload: str | None = None ) -> bool: """ - Resume a paused workflow, optionally providing input data. + Send a resume signal to the workflow. Args: signal_name: The name of the signal to send (default: "resume") @@ -446,8 +399,7 @@ async def resume( self._logger.info( f"{signal_name} signal sent to workflow {self._workflow_id}" ) - # Note: We don't update status to running here - it will be updated - # when the workflow actually resumes in run_async + self.update_status("running") return True except Exception as e: self._logger.error( From 0df1deadef66a141e769c3c60486f6ba7ed6c958 Mon Sep 17 00:00:00 2001 From: Sarmad Qadri Date: Thu, 3 Apr 2025 16:00:31 -0400 Subject: [PATCH 6/9] The app server kinda sorta works! --- .../workflow_mcp_server/basic_agent_server.py | 132 +++++++++++++++ examples/workflow_mcp_server/client.py | 152 +++--------------- examples/workflow_mcp_server/server.py | 6 +- src/mcp_agent/agents/agent_config.py | 25 +-- src/mcp_agent/app.py | 17 +- src/mcp_agent/app_server.py | 49 ++++-- src/mcp_agent/executor/workflow.py | 3 +- 7 files changed, 202 insertions(+), 182 deletions(-) create mode 100644 examples/workflow_mcp_server/basic_agent_server.py diff --git a/examples/workflow_mcp_server/basic_agent_server.py b/examples/workflow_mcp_server/basic_agent_server.py new file mode 100644 index 000000000..c2dd3eb8c --- /dev/null +++ b/examples/workflow_mcp_server/basic_agent_server.py @@ -0,0 +1,132 @@ +""" +Workflow MCP Server Example + +This example demonstrates three approaches to creating agents and workflows: +1. Traditional workflow-based approach with manual agent creation +2. Programmatic agent configuration using AgentConfig +3. Declarative agent configuration using FastMCPApp decorators +""" + +import asyncio +import os +import logging + +from mcp_agent.app import MCPApp +from mcp_agent.app_server import create_mcp_server_for_app +from mcp_agent.agents.agent import Agent +from mcp_agent.workflows.llm.augmented_llm import RequestParams +from mcp_agent.workflows.llm.llm_selector import ModelPreferences +from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM +from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM +from mcp_agent.executor.workflow import Workflow, WorkflowResult + +# Initialize logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Create a single FastMCPApp instance (which extends MCPApp) +app = MCPApp(name="basic_agent_server", description="Basic agent server example") + + +@app.workflow +class BasicAgentWorkflow(Workflow[str]): + """ + A basic workflow that demonstrates how to create a simple agent. + This workflow is used as an example of a basic agent configuration. + """ + + async def run(self, input: str) -> WorkflowResult[str]: + """ + Run the basic agent workflow. + + Args: + input: The input string to prompt the agent. + + Returns: + WorkflowResult containing the processed data. + """ + + logger = app.logger + context = app.context + + logger.info("Current config:", data=context.config.model_dump()) + logger.info("Received input:", data=input) + + # Add the current directory to the filesystem server's args + context.config.mcp.servers["filesystem"].args.extend([os.getcwd()]) + + finder_agent = Agent( + name="finder", + instruction="""You are an agent with access to the filesystem, + as well as the ability to fetch URLs. Your job is to identify + the closest match to a user's request, make the appropriate tool calls, + and return the URI and CONTENTS of the closest match.""", + server_names=["fetch", "filesystem"], + ) + + async with finder_agent: + logger.info("finder: Connected to server, calling list_tools...") + result = await finder_agent.list_tools() + logger.info("Tools available:", data=result.model_dump()) + + llm = await finder_agent.attach_llm(OpenAIAugmentedLLM) + result = await llm.generate_str( + message="Print the contents of mcp_agent.config.yaml verbatim", + ) + logger.info(f"mcp_agent.config.yaml contents: {result}") + + # Let's switch the same agent to a different LLM + llm = await finder_agent.attach_llm(AnthropicAugmentedLLM) + + result = await llm.generate_str( + message="Print the first 2 paragraphs of https://modelcontextprotocol.io/introduction", + ) + logger.info(f"First 2 paragraphs of Model Context Protocol docs: {result}") + + # Multi-turn conversations + result = await llm.generate_str( + message="Summarize those paragraphs in a 128 character tweet", + # You can configure advanced options by setting the request_params object + request_params=RequestParams( + # See https://modelcontextprotocol.io/docs/concepts/sampling#model-preferences for more details + modelPreferences=ModelPreferences( + costPriority=0.1, + speedPriority=0.2, + intelligencePriority=0.7, + ), + # You can also set the model directly using the 'model' field + # Generally request_params type aligns with the Sampling API type in MCP + ), + ) + logger.info(f"Paragraph as a tweet: {result}") + return WorkflowResult(value=result) + + +async def main(): + async with app.run() as agent_app: + # Add the current directory to the filesystem server's args if needed + context = agent_app.context + if "filesystem" in context.config.mcp.servers: + context.config.mcp.servers["filesystem"].args.extend([os.getcwd()]) + + # Log registered workflows and agent configurations + logger.info(f"Creating MCP server for {agent_app.name}") + + logger.info("Registered workflows:") + for workflow_id in agent_app.workflows: + logger.info(f" - {workflow_id}") + + logger.info("Registered agent configurations:") + for name, config in agent_app.agent_configs.items(): + workflow_type = config.get_agent_type() or "basic" + logger.info(f" - {name} ({workflow_type})") + + # Create the MCP server that exposes both workflows and agent configurations + mcp_server = create_mcp_server_for_app(agent_app) + + # Run the server + await mcp_server.run_stdio_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/workflow_mcp_server/client.py b/examples/workflow_mcp_server/client.py index e7bf598e0..381dd4d76 100644 --- a/examples/workflow_mcp_server/client.py +++ b/examples/workflow_mcp_server/client.py @@ -2,6 +2,7 @@ import time from mcp_agent.app import MCPApp +from mcp_agent.config import MCPServerSettings from mcp_agent.mcp.gen_client import gen_client @@ -16,19 +17,20 @@ async def main(): logger.info("Connecting to workflow server...") # Override the server configuration to point to our local script - context.server_registry.add_server( - "workflow_server", + context.server_registry.registry["basic_agent_server"] = MCPServerSettings( + name="basic_agent_server", + description="Local workflow server running the basic agent example", command="uv", - args=["run", "server.py"], - description="Local workflow server exposing data processing and summarization workflows", + args=["run", "basic_agent_server.py"], ) # Connect to the workflow server - async with gen_client("workflow_server", context.server_registry) as server: + async with gen_client("basic_agent_server", context.server_registry) as server: # List available tools - tools = await server.list_tools() + tools_result = await server.list_tools() logger.info( - "Available tools:", data={"tools": [tool.name for tool in tools]} + "Available tools:", + data={"tools": [tool.name for tool in tools_result.tools]}, ) # List available workflows @@ -37,137 +39,21 @@ async def main(): workflows = {} if workflows_response.content and len(workflows_response.content) > 0: - workflows = workflows_response.content[0].text + workflows_text = workflows_response.content[0].text + try: + # Try to parse the response as JSON if it's a string + import json + + workflows = json.loads(workflows_text) + except (json.JSONDecodeError, TypeError): + # If it's not valid JSON, just use the text + logger.info("Received workflows text:", data=workflows_text) + workflows = {"workflows_text": workflows_text} logger.info( "Available workflows:", data={"workflows": list(workflows.keys())} ) - # Run summarization workflow - logger.info("Running the SummarizationWorkflowRegistered workflow...") - sample_text = """ - The Model Context Protocol (MCP) is a standardized API for AI assistants to communicate with tools - and services in their context. This protocol standardizes the way assistants access data through - tool definitions, tools calls, and file/URL content. It is designed to make it easy for developers - to give AI assistants access to data and tools, and for AI assistants to understand how to interact - with those tools. The protocol defines a consistent pattern for tool discovery, invocation, - and response handling that works across different AI assistant implementations. - """ - - # Start the summarization workflow - workflow_run_response = await server.call_tool( - "workflows/SummarizationWorkflowRegistered/run", - { - "args": { - "content": sample_text, - "max_length": 200, - "style": "technical", - "key_points": 3, - } - }, - ) - - if workflow_run_response.content and len(workflow_run_response.content) > 0: - workflow_result = workflow_run_response.content[0].text - workflow_id = workflow_result.get("workflow_id") - logger.info( - "Summarization workflow started", data={"workflow_id": workflow_id} - ) - - # Wait for workflow to complete - logger.info("Waiting for workflow to complete...") - await asyncio.sleep(5) - - # Check workflow status - status_response = await server.call_tool( - "workflows/SummarizationWorkflowRegistered/get_status", - {"workflow_instance_id": workflow_id}, - ) - - if status_response.content and len(status_response.content) > 0: - status = status_response.content[0].text - - if status.get("completed", False) and "result" in status: - logger.info("Workflow completed!") - result = status.get("result", {}) - - if "value" in result: - summary = result["value"].get( - "summary", "No summary available" - ) - key_points = result["value"].get( - "key_points", "No key points available" - ) - - logger.info("Summary:", data={"summary": summary}) - logger.info("Key Points:", data={"key_points": key_points}) - else: - logger.info("Workflow status:", data={"status": status}) - - # Run data processor workflow - logger.info("Running the DataProcessorWorkflowRegistered workflow...") - - # Use a URL that the server's fetch tool can access - data_workflow_response = await server.call_tool( - "workflows/DataProcessorWorkflowRegistered/run", - { - "args": { - "source": "https://modelcontextprotocol.io/introduction", - "analysis_prompt": "Analyze what MCP is and its key benefits", - "output_format": "markdown", - } - }, - ) - - if ( - data_workflow_response.content - and len(data_workflow_response.content) > 0 - ): - workflow_result = data_workflow_response.content[0].text - workflow_id = workflow_result.get("workflow_id") - logger.info( - "Data processor workflow started", data={"workflow_id": workflow_id} - ) - - # Wait for workflow to complete (this might take longer) - logger.info("Waiting for data processor workflow to complete...") - max_wait = 30 # Maximum wait time in seconds - wait_interval = 5 # Check every 5 seconds - - for _ in range(max_wait // wait_interval): - await asyncio.sleep(wait_interval) - - # Check workflow status - status_response = await server.call_tool( - "workflows/DataProcessorWorkflowRegistered/get_status", - {"workflow_instance_id": workflow_id}, - ) - - if status_response.content and len(status_response.content) > 0: - status = status_response.content[0].text - - if status.get("completed", False): - result = status.get("result", {}) - logger.info("Data processor workflow completed!") - - if "value" in result: - logger.info( - "Processed Data:", - data={"data": result["value"][:500] + "..."}, - ) - break - - # If failed, break early - if status.get("error"): - logger.error( - "Workflow failed:", data={"error": status.get("error")} - ) - break - else: - logger.warning( - "Workflow took too long to complete, giving up after waiting" - ) - if __name__ == "__main__": start = time.time() diff --git a/examples/workflow_mcp_server/server.py b/examples/workflow_mcp_server/server.py index 475a04a24..d40e6bcbc 100644 --- a/examples/workflow_mcp_server/server.py +++ b/examples/workflow_mcp_server/server.py @@ -168,9 +168,9 @@ async def run( "output_format": format_instruction, "workflow_completed": True, }, - start_time=self.state.metadata.get( - "start_time" - ), # TODO: saqadri (MAC) - fix + start_time=self.state.metadata["start_time"] + if "start_time" in self.state.metadata + else None, # Handle missing "start_time" gracefully end_time=self.state.updated_at, ) diff --git a/src/mcp_agent/agents/agent_config.py b/src/mcp_agent/agents/agent_config.py index cecfececa..4f1b1bdc0 100644 --- a/src/mcp_agent/agents/agent_config.py +++ b/src/mcp_agent/agents/agent_config.py @@ -17,7 +17,6 @@ from pydantic import BaseModel, Field, ConfigDict from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM, RequestParams -from mcp_agent.human_input.types import HumanInputCallback if TYPE_CHECKING: from mcp_agent.agents.agent import Agent @@ -46,7 +45,7 @@ class AugmentedLLMConfig(BaseModel, Generic[LLM]): # Request parameters used in generate calls default_request_params: Optional[RequestParams] = None - model_config = ConfigDict(extra=True, arbitrary_types_allowed=True) + model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True) async def create_llm(self) -> LLM: """ @@ -66,25 +65,6 @@ async def create_llm(self) -> LLM: return self.factory(**params) -class BasicAgentConfig(BaseModel): - """ - Configuration for a basic agent with an LLM. - This contains all the parameters needed to create a standard Agent - without any complex workflow pattern. - """ - - name: str - instruction: Union[str, Callable[[Dict], str]] = "You are a helpful agent." - server_names: List[str] = Field(default_factory=list) - functions: List[Callable] = Field(default_factory=list) - connection_persistence: bool = True - human_input_callback: Optional[HumanInputCallback] = None - llm_config: Optional[AugmentedLLMConfig] = None - extra_kwargs: Dict[str, Any] = Field(default_factory=dict) - - model_config = ConfigDict(arbitrary_types_allowed=True) - - class ParallelLLMConfig(BaseModel): """Type-safe configuration for ParallelLLM workflow pattern.""" @@ -134,7 +114,8 @@ class AgentConfig(BaseModel): server_names: List[str] = Field(default_factory=list) functions: List[Callable] = Field(default_factory=list) connection_persistence: bool = True - human_input_callback: Optional[HumanInputCallback] = None + # TODO: saqadri (MAC) - Add a way to specify a custom human input callback + # human_input_callback: Optional[HumanInputCallback] = None # LLM config for either basic agent or workflow LLM factory llm_config: Optional[AugmentedLLMConfig] = None diff --git a/src/mcp_agent/app.py b/src/mcp_agent/app.py index f49bac84c..0aafa7668 100644 --- a/src/mcp_agent/app.py +++ b/src/mcp_agent/app.py @@ -224,14 +224,15 @@ def workflow( If Temporal is available & we use a TemporalExecutor, this decorator will wrap with temporal_workflow.defn. """ - decorator_registry = self.context.decorator_registry - execution_engine = self.engine - workflow_defn_decorator = decorator_registry.get_workflow_defn_decorator( - execution_engine - ) - - if workflow_defn_decorator: - return workflow_defn_decorator(cls, *args, **kwargs) + # TODO: saqadri (MAC) - fix this for Temporal support + # decorator_registry = self.context.decorator_registry + # execution_engine = self.engine + # workflow_defn_decorator = decorator_registry.get_workflow_defn_decorator( + # execution_engine + # ) + + # if workflow_defn_decorator: + # return workflow_defn_decorator(cls, *args, **kwargs) cls._app = self self._workflows[workflow_id or cls.__name__] = cls diff --git a/src/mcp_agent/app_server.py b/src/mcp_agent/app_server.py index 334d9e228..b0a8c2d7a 100644 --- a/src/mcp_agent/app_server.py +++ b/src/mcp_agent/app_server.py @@ -68,8 +68,8 @@ def __init__(self, mcp: FastMCP, context: "Context", **kwargs): def register_workflow(self, workflow_name: str, workflow_cls: Type[Workflow]): """Register a workflow class.""" - if workflow_name not in self.context.app.workflows: - self.context.app.workflows[workflow_name] = workflow_cls + if workflow_name not in self.context.workflows: + self.workflows[workflow_name] = workflow_cls # Create tools for this workflow create_workflow_specific_tools(self.mcp, workflow_name, workflow_cls) @@ -111,7 +111,7 @@ async def get_or_create_agent(self, name: str) -> Agent: return self.active_agents[name] # Check if there's a configuration for this agent - agent_config = self.context.app._agent_configs.get(name) + agent_config = self.agent_configs.get(name) if agent_config: try: agent = await create_agent(name=agent_config.name, context=self.context) @@ -126,6 +126,26 @@ async def get_or_create_agent(self, name: str) -> Agent: f"Agent not found: {name}. No active agent or configuration with this name exists." ) + @property + def app(self) -> MCPApp: + """Get the MCPApp instance associated with this server context.""" + return self.context.app + + @property + def workflows(self) -> Dict[str, Type[Workflow]]: + """Get the workflows registered in this server context.""" + return self.app.workflows + + @property + def workflow_registry(self) -> WorkflowRegistry: + """Get the workflow registry for this server context.""" + return self.context.workflow_registry + + @property + def agent_configs(self) -> Dict[str, AgentConfig]: + """Get the agent configurations for this server context.""" + return self.app._agent_configs + def create_mcp_server_for_app(app: MCPApp) -> FastMCP: """ @@ -344,7 +364,7 @@ def list_workflows(ctx: MCPContext) -> Dict[str, Dict[str, Any]]: server_context: ServerContext = ctx.request_context.lifespan_context result = {} - for workflow_name, workflow_cls in server_context.context.app.workflows.items(): + for workflow_name, workflow_cls in server_context.workflows.items(): # Get workflow documentation run_fn_tool = FastTool.from_function(workflow_cls.run) @@ -379,7 +399,7 @@ def list_workflow_runs(ctx: MCPContext) -> List[Dict[str, Any]]: server_context: ServerContext = ctx.request_context.lifespan_context # Get all workflow statuses from the registry - workflow_statuses = server_context.context.workflow_registry.list_workflows() + workflow_statuses = server_context.workflow_registry.list_workflows() return workflow_statuses @mcp.tool(name="workflows/run") @@ -443,7 +463,7 @@ async def resume_workflow( True if the workflow was resumed, False otherwise. """ server_context: ServerContext = ctx.request_context.lifespan_context - workflow_registry = server_context.context.workflow_registry + workflow_registry = server_context.workflow_registry if not workflow_registry: raise ToolError("Workflow registry not found for MCPApp Server.") @@ -468,7 +488,7 @@ async def cancel_workflow(ctx: MCPContext, workflow_id: str) -> bool: True if the workflow was cancelled, False otherwise. """ server_context: ServerContext = ctx.request_context.lifespan_context - workflow_registry = server_context.context.workflow_registry + workflow_registry = server_context.workflow_registry # Get the workflow instance from the registry workflow = workflow_registry.get_workflow(workflow_id) @@ -498,7 +518,7 @@ def create_agent_tools(mcp: FastMCP, server_context: ServerContext): for _, agent in server_context.active_agents.items(): create_agent_specific_tools(mcp, server_context, agent) - for _, agent_config in server_context.app._agent_configs.items(): + for _, agent_config in server_context.agent_configs.items(): agent = server_context.get_or_create_agent(agent_config.name) @@ -655,7 +675,7 @@ def create_workflow_tools(mcp: FastMCP, server_context: ServerContext): logger.warning("Server config not available for creating workflow tools") return - for workflow_name, workflow_cls in server_context.context.app.workflows.items(): + for workflow_name, workflow_cls in server_context.workflows.items(): create_workflow_specific_tools(mcp, workflow_name, workflow_cls) @@ -874,18 +894,19 @@ async def _workflow_run( run_parameters: Dict[str, Any] | None = None, ) -> str: server_context: ServerContext = ctx.request_context.lifespan_context - app = server_context.context.app - if workflow_name not in app.workflows: + if workflow_name not in server_context.workflows: raise ToolError(f"Workflow '{workflow_name}' not found.") # Get the workflow class - workflow_cls = app.workflows[workflow_name] + workflow_cls = server_context.workflows[workflow_name] # Create and initialize the workflow instance using the factory method try: # Create workflow instance - workflow = await workflow_cls.create(name=workflow_name, context=app.context) + workflow = await workflow_cls.create( + name=workflow_name, context=server_context.context + ) run_parameters = run_parameters or {} @@ -902,7 +923,7 @@ def _workflow_status( ctx: MCPContext, workflow_id: str, workflow_name: str | None = None ) -> Dict[str, Any]: server_context: ServerContext = ctx.request_context.lifespan_context - workflow_registry = server_context.context.workflow_registry + workflow_registry = server_context.workflow_registry if not workflow_registry: raise ToolError("Workflow registry not found for MCPApp Server.") diff --git a/src/mcp_agent/executor/workflow.py b/src/mcp_agent/executor/workflow.py index ef75d9fbb..46985af6b 100644 --- a/src/mcp_agent/executor/workflow.py +++ b/src/mcp_agent/executor/workflow.py @@ -6,7 +6,6 @@ Generic, Optional, TypeVar, - Union, List, TYPE_CHECKING, ) @@ -161,7 +160,7 @@ def record_error(self, error: Exception) -> None: class WorkflowResult(BaseModel, Generic[T]): - value: Union[T, None] = None + value: Optional[T] = None metadata: Dict[str, Any] = Field(default_factory=dict) start_time: float | None = None end_time: float | None = None From 78ac47195ea331d9a14d98347a96352c055538a0 Mon Sep 17 00:00:00 2001 From: Sarmad Qadri Date: Thu, 3 Apr 2025 17:26:48 -0400 Subject: [PATCH 7/9] Agent server is operational --- examples/workflow_mcp_server/client.py | 107 +++++++++++++++++++++---- src/mcp_agent/executor/workflow.py | 7 +- 2 files changed, 93 insertions(+), 21 deletions(-) diff --git a/examples/workflow_mcp_server/client.py b/examples/workflow_mcp_server/client.py index 381dd4d76..f08f98d70 100644 --- a/examples/workflow_mcp_server/client.py +++ b/examples/workflow_mcp_server/client.py @@ -1,6 +1,6 @@ import asyncio import time - +from mcp.types import CallToolResult from mcp_agent.app import MCPApp from mcp_agent.config import MCPServerSettings from mcp_agent.mcp.gen_client import gen_client @@ -36,23 +36,100 @@ async def main(): # List available workflows logger.info("Fetching available workflows...") workflows_response = await server.call_tool("workflows/list", {}) + logger.info( + "Available workflows:", + data=_tool_result_to_json(workflows_response) or workflows_response, + ) + + # Call the BasicAgentWorkflow + run_result = await server.call_tool( + "workflows/BasicAgentWorkflow/run", + arguments={ + "run_parameters": { + "input": "Find the closest match to this request." + } + }, + ) - workflows = {} - if workflows_response.content and len(workflows_response.content) > 0: - workflows_text = workflows_response.content[0].text - try: - # Try to parse the response as JSON if it's a string - import json + workflow_id: str = run_result.content[0].text + logger.info(f"Started BasicAgentWorkflow/run. workflow ID={workflow_id}") - workflows = json.loads(workflows_text) - except (json.JSONDecodeError, TypeError): - # If it's not valid JSON, just use the text - logger.info("Received workflows text:", data=workflows_text) - workflows = {"workflows_text": workflows_text} + # Wait for the workflow to complete + while True: + get_status_result = await server.call_tool( + "workflows/BasicAgentWorkflow/get_status", + arguments={"workflow_id": workflow_id}, + ) - logger.info( - "Available workflows:", data={"workflows": list(workflows.keys())} - ) + workflow_status = _tool_result_to_json(get_status_result) + if workflow_status is None: + logger.error( + f"Failed to parse workflow status response: {get_status_result}" + ) + break + + logger.info( + f"Workflow {workflow_id} status:", + data=workflow_status, + ) + + if not workflow_status.get("status"): + logger.error( + f"Workflow {workflow_id} status is empty. get_status_result:", + data=get_status_result, + ) + break + + if workflow_status.get("status") == "completed": + logger.info( + f"Workflow {workflow_id} completed successfully! Result:", + data=workflow_status.get("result"), + ) + + break + elif workflow_status.get("status") == "error": + logger.error( + f"Workflow {workflow_id} failed with error:", + data=workflow_status, + ) + break + elif workflow_status.get("status") == "running": + logger.info( + f"Workflow {workflow_id} is still running...", + ) + elif workflow_status.get("status") == "cancelled": + logger.error( + f"Workflow {workflow_id} was cancelled.", + data=workflow_status, + ) + break + else: + logger.error( + f"Unknown workflow status: {workflow_status.get('status')}", + data=workflow_status, + ) + break + + await asyncio.sleep(5) + + # TODO: UNCOMMENT ME to try out cancellation: + await server.call_tool( + "workflows/cancel", + arguments={"workflow_id": workflow_id}, + ) + + +def _tool_result_to_json(tool_result: CallToolResult): + if tool_result.content and len(tool_result.content) > 0: + text = tool_result.content[0].text + try: + # Try to parse the response as JSON if it's a string + import json + + return json.loads(text) + except (json.JSONDecodeError, TypeError): + # If it's not valid JSON, just use the text + return None if __name__ == "__main__": diff --git a/src/mcp_agent/executor/workflow.py b/src/mcp_agent/executor/workflow.py index 46985af6b..5a6cd6258 100644 --- a/src/mcp_agent/executor/workflow.py +++ b/src/mcp_agent/executor/workflow.py @@ -357,10 +357,6 @@ async def _execute_workflow(): f"Error cleaning up workflow {self.name} (ID: {self._workflow_id}): {str(cleanup_error)}" ) - # Unregister from the workflow registry (if available) - if self.context and self.context.workflow_registry: - self.context.workflow_registry.unregister(self._workflow_id) - # TODO: saqadri (MAC) - figure out how to do this for different executors. # For Temporal, we would replace this with workflow.start() which also doesn't block self._run_task = asyncio.create_task(_execute_workflow()) @@ -437,6 +433,7 @@ def get_status(self) -> Dict[str, Any]: status = { "id": self._workflow_id, "name": self.name, + "status": self.state.status, "running": self._run_task is not None and not self._run_task.done() if self._run_task else False, @@ -521,8 +518,6 @@ async def cleanup(self): self._logger.debug(f"Cleaning up workflow {self.name}") self._initialized = False - self.state.status = "cleaned_up" - self.state.updated_at = datetime.now(timezone.utc).timestamp() async def __aenter__(self): """Support for async context manager pattern.""" From 8cb4114d7031a09ec35e174c875c456e2b3b0856 Mon Sep 17 00:00:00 2001 From: Sarmad Qadri Date: Thu, 3 Apr 2025 17:41:02 -0400 Subject: [PATCH 8/9] Comment out cancellation --- examples/workflow_mcp_server/client.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/workflow_mcp_server/client.py b/examples/workflow_mcp_server/client.py index f08f98d70..6f505f557 100644 --- a/examples/workflow_mcp_server/client.py +++ b/examples/workflow_mcp_server/client.py @@ -113,10 +113,10 @@ async def main(): await asyncio.sleep(5) # TODO: UNCOMMENT ME to try out cancellation: - await server.call_tool( - "workflows/cancel", - arguments={"workflow_id": workflow_id}, - ) + # await server.call_tool( + # "workflows/cancel", + # arguments={"workflow_id": workflow_id}, + # ) def _tool_result_to_json(tool_result: CallToolResult): From ce64b67cdb76dc7abb1490adb9d663a140b007f8 Mon Sep 17 00:00:00 2001 From: Sarmad Qadri Date: Thu, 3 Apr 2025 17:56:38 -0400 Subject: [PATCH 9/9] Update readme --- examples/workflow_mcp_server/README.md | 42 +-- examples/workflow_mcp_server/server.py | 453 ------------------------- 2 files changed, 14 insertions(+), 481 deletions(-) delete mode 100644 examples/workflow_mcp_server/server.py diff --git a/examples/workflow_mcp_server/README.md b/examples/workflow_mcp_server/README.md index ede9352f4..f948c090f 100644 --- a/examples/workflow_mcp_server/README.md +++ b/examples/workflow_mcp_server/README.md @@ -57,9 +57,9 @@ All three approaches can use `app_server.py` to expose the agents and workflows ## Code Structure -- `server.py`: Defines the workflows and creates the MCP server -- `client.py`: Connects to the server and runs the workflows -- `mcp_agent.config.yaml`: Configuration for MCP servers and other settings +- `basic_agent_server.py`: Defines the BasicAgentWorkflow and creates an MCP server +- `client.py`: Connects to the server and runs the workflow +- `mcp_agent.config.yaml`: Configuration for MCP servers - `mcp_agent.secrets.yaml`: Secret API keys (not included in repository) ## Understanding the Code @@ -69,10 +69,17 @@ All three approaches can use `app_server.py` to expose the agents and workflows Workflows are defined by subclassing the `Workflow` base class and implementing: - The `run` method containing the main workflow logic -- `initialize` and `cleanup` methods for setup and teardown -- Optionally a custom `create` class method for specialized instantiation +- Optional:`initialize` and `cleanup` methods for setup and teardown +- Optional: a custom `create` class method for specialized instantiation + +Workflows are registered with the MCPApp using the `@app.workflow` decorator: + +Example: ```python +app = MCPApp(name="workflow_mcp_server") + +@app.workflow class DataProcessorWorkflow(Workflow[str]): @classmethod async def create(cls, executor: Executor, name: str | None = None, **kwargs: Any) -> "DataProcessorWorkflow": @@ -91,21 +98,6 @@ class DataProcessorWorkflow(Workflow[str]): # Clean up resources ``` -The base `Workflow` class provides a default implementation of `create()` that handles basic initialization, but workflows can override this for specialized setup. Our example shows both approaches: - -1. `DataProcessorWorkflow` overrides the `create()` method to implement custom initialization -2. `SummarizationWorkflow` uses the default implementation from the base class - -Workflows are registered with the MCPApp using the `@app.workflow` decorator: - -```python -app = MCPApp(name="workflow_mcp_server") - -@app.workflow -class DataProcessorWorkflowRegistered(DataProcessorWorkflow): - pass -``` - ### Approach 2: Programmatic Agent Configuration Agent configurations can be created programmatically using Pydantic models: @@ -118,8 +110,6 @@ research_agent_config = AgentConfig( server_names=["fetch", "filesystem"], llm_config=AugmentedLLMConfig( factory=OpenAIAugmentedLLM, - model="gpt-4o", - temperature=0.7 ) ) @@ -130,7 +120,6 @@ research_team_config = AgentConfig( parallel_config=ParallelWorkflowConfig( fan_in_agent="editor", fan_out_agents=["summarizer", "fact_checker"], - concurrent=True ) ) @@ -152,8 +141,6 @@ fast_app = FastMCPApp(name="fast_workflow_mcp_server") def assistant_config(config): config.llm_config = AugmentedLLMConfig( factory=OpenAIAugmentedLLM, - model="gpt-4o", - temperature=0.7 ) return config @@ -162,8 +149,7 @@ def assistant_config(config): agent_names=["mathematician", "programmer", "writer"]) def router_config(config): config.llm_config = AugmentedLLMConfig( - factory=OpenAIAugmentedLLM, - model="gpt-4o" + factory=OpenAIAugmentedLLM ) config.router_config.top_k = 1 return config @@ -177,7 +163,7 @@ The MCP server automatically exposes both workflows and agent configurations as - Running a workflow: `workflows/{workflow_id}/run` - Checking status: `workflows/{workflow_id}/get_status` -- Controlling workflow execution: `workflows/{workflow_id}/pause`, `workflows/{workflow_id}/resume`, `workflows/{workflow_id}/cancel` +- Controlling workflow execution: `workflows/resume`, `workflows/cancel` **Agent tools**: diff --git a/examples/workflow_mcp_server/server.py b/examples/workflow_mcp_server/server.py deleted file mode 100644 index d40e6bcbc..000000000 --- a/examples/workflow_mcp_server/server.py +++ /dev/null @@ -1,453 +0,0 @@ -""" -Workflow MCP Server Example - -This example demonstrates three approaches to creating agents and workflows: -1. Traditional workflow-based approach with manual agent creation -2. Programmatic agent configuration using AgentConfig -3. Declarative agent configuration using FastMCPApp decorators -""" - -import asyncio -import os -import logging -from typing import Dict, Any, Optional, TYPE_CHECKING - -if TYPE_CHECKING: - from mcp_agent.context import Context - -from mcp_agent.fast_app import FastMCPApp -from mcp_agent.app_server import create_mcp_server_for_app -from mcp_agent.agents.agent import Agent -from mcp_agent.agents.agent_config import ( - AgentConfig, - AugmentedLLMConfig, - ParallelLLMConfig, -) -from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM -from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM -from mcp_agent.executor.workflow import Workflow, WorkflowResult - -# Initialize logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -class DataProcessorWorkflow(Workflow[str]): - """ - A workflow that processes data using multiple agents, each specialized for a different task. - This workflow demonstrates how to use multiple agents to process data in a sequence. - """ - - @classmethod - async def create( - cls, name: str | None = None, context: Optional["Context"] = None, **kwargs: Any - ) -> "DataProcessorWorkflow": - """ - Factory method to create and initialize the DataProcessorWorkflow. - Demonstrates how to override the default create method for specialized initialization. - - Args: - name: Optional workflow name - context: Optional context to use (will use global context if not provided) - **kwargs: Additional parameters for customization - - Returns: - An initialized DataProcessorWorkflow instance - """ - # Create the workflow instance - workflow = cls(name=name or "data_processor", context=context, **kwargs) - - # Initialize it (which will set up agents, etc.) - await workflow.initialize() - - return workflow - - async def initialize(self): - await super().initialize() - self.state.status = "ready" - - # Create agents for different steps of the workflow - self.finder_agent = Agent( - name="finder", - instruction="You are specialized in finding and retrieving information from files or URLs.", - server_names=["fetch", "filesystem"], - ) - - self.analyzer_agent = Agent( - name="analyzer", - instruction="You are specialized in analyzing text data and extracting key insights.", - server_names=["fetch"], - ) - - self.formatter_agent = Agent( - name="formatter", - instruction="You are specialized in formatting data into structured outputs.", - server_names=[], - ) - - # Initialize the agents - await self.finder_agent.initialize() - await self.analyzer_agent.initialize() - await self.formatter_agent.initialize() - - # Attach LLMs to the agents - self.finder_llm = await self.finder_agent.attach_llm(OpenAIAugmentedLLM) - self.analyzer_llm = await self.analyzer_agent.attach_llm(OpenAIAugmentedLLM) - self.formatter_llm = await self.formatter_agent.attach_llm(OpenAIAugmentedLLM) - - async def cleanup(self): - # Clean up resources - await self.finder_agent.shutdown() - await self.analyzer_agent.shutdown() - await self.formatter_agent.shutdown() - await super().cleanup() - - async def run( - self, - source: str, - analysis_prompt: Optional[str] = None, - output_format: Optional[str] = None, - ) -> WorkflowResult[str]: - """ - Run the data processing workflow. - - Args: - source: The source to process. Can be a file path or URL. - analysis_prompt: Optional specific instructions for the analysis step. - output_format: Optional format for the output (e.g., "json", "markdown", "summary"). - - Returns: - WorkflowResult containing the processed data. - """ - self.state.status = "running" - self._logger.info(f"Starting data processing workflow for source: {source}") - - # Step 1: Find and retrieve the data - self._logger.info("Step 1: Finding and retrieving data") - self.state.metadata["current_step"] = "retrieval" - - retrieval_prompt = f"Retrieve the content from {source} and return it verbatim." - raw_data = await self.finder_llm.generate_str(retrieval_prompt) - - self.state.metadata["retrieval_completed"] = True - self.state.metadata["content_length"] = len(raw_data) - - # Step 2: Analyze the data - self._logger.info("Step 2: Analyzing data") - self.state.metadata["current_step"] = "analysis" - - analysis_instruction = ( - analysis_prompt - or "Analyze this content and extract the key points, main themes, and most important information." - ) - analysis = await self.analyzer_llm.generate_str( - f"{analysis_instruction}\n\nHere is the content to analyze:\n\n{raw_data[:5000]}" # Limit to 5000 chars for safety - ) - - self.state.metadata["analysis_completed"] = True - - # Step 3: Format the result - self._logger.info("Step 3: Formatting output") - self.state.metadata["current_step"] = "formatting" - - format_instruction = output_format or "markdown" - format_prompt = f"Format the following analysis into {format_instruction} format, highlighting the most important points:\n\n{analysis}" - - formatted_result = await self.formatter_llm.generate_str(format_prompt) - - self.state.metadata["formatting_completed"] = True - self.state.status = "completed" - - # Create and return the final result - result = WorkflowResult[str]( - value=formatted_result, - metadata={ - "source": source, - "content_length": len(raw_data), - "analysis_prompt": analysis_prompt, - "output_format": format_instruction, - "workflow_completed": True, - }, - start_time=self.state.metadata["start_time"] - if "start_time" in self.state.metadata - else None, # Handle missing "start_time" gracefully - end_time=self.state.updated_at, - ) - - return result - - -class SummarizationWorkflow(Workflow[Dict[str, Any]]): - """ - A workflow that summarizes text content with customizable parameters. - This workflow demonstrates how to create a simple summarization pipeline. - - This workflow uses the default create() implementation from the base Workflow class, - showing that it's not necessary to override create() in every workflow. - """ - - async def initialize(self): - await super().initialize() - - # Create an agent for summarization - self.summarizer_agent = Agent( - name="summarizer", - instruction="You are specialized in summarizing content clearly and concisely.", - server_names=["fetch", "filesystem"], - ) - - # Initialize the agent - await self.summarizer_agent.initialize() - - # Attach LLM to the agent - self.summarizer_llm = await self.summarizer_agent.attach_llm(OpenAIAugmentedLLM) - - async def cleanup(self): - await self.summarizer_agent.shutdown() - await super().cleanup() - - async def run( - self, - content: str, - max_length: int = 500, - style: str = "concise", - key_points: int = 3, - ) -> WorkflowResult[Dict[str, Any]]: - """ - Summarize the provided content. - - Args: - content: The text content to summarize. - max_length: Maximum length of the summary in characters. - style: Style of summarization (concise, detailed, technical, simple). - key_points: Number of key points to include. - - Returns: - WorkflowResult containing the summary and metadata. - """ - self.state.status = "running" - self._logger.info( - f"Starting summarization workflow (style: {style}, key_points: {key_points})" - ) - - # Record the start time - start_time = self.state.updated_at - - # Build the summarization prompt - prompt = f""" - Summarize the following content in a {style} style. - Include {key_points} key points. - Keep the summary under {max_length} characters. - - Content to summarize: - --- - {content[:10000]} # Limit content to 10,000 chars for safety - --- - """ - - summary = await self.summarizer_llm.generate_str(prompt) - - # Extract key points using a follow-up prompt - key_points_prompt = f"Based on the content I just summarized, list exactly {key_points} key points in bullet point format." - key_points_list = await self.summarizer_llm.generate_str(key_points_prompt) - - self.state.status = "completed" - - # Create the structured result - result = WorkflowResult[Dict[str, Any]]( - value={ - "summary": summary, - "key_points": key_points_list, - "style": style, - "length": len(summary), - "requested_max_length": max_length, - }, - metadata={ - "workflow_name": self.name, - "content_length": len(content), - "completion_status": "success", - }, - start_time=start_time, - end_time=self.state.updated_at, - ) - - return result - - -# Create a single FastMCPApp instance (which extends MCPApp) -app = FastMCPApp(name="workflow_mcp_server") - -# ------------------------------------------------------------------------- -# Approach 1: Traditional workflow registration with @app.workflow decorator -# ------------------------------------------------------------------------- - - -# Register workflows with the app -@app.workflow -class DataProcessorWorkflowRegistered(DataProcessorWorkflow): - """Data processing workflow registered with the app.""" - - pass - - -@app.workflow -class SummarizationWorkflowRegistered(SummarizationWorkflow): - """Summarization workflow registered with the app.""" - - pass - - -# ------------------------------------------------------------------------- -# Approach 2: Programmatic agent configuration with AgentConfig -# ------------------------------------------------------------------------- - -# Create a basic agent configuration -research_agent_config = AgentConfig( - name="researcher", - instruction="You are a helpful research assistant that finds information and presents it clearly.", - server_names=["fetch", "filesystem"], - llm_config=AugmentedLLMConfig( - factory=OpenAIAugmentedLLM, - model="gpt-4o", - temperature=0.7, - provider_params={"max_tokens": 2000}, - ), -) - -# Create component agents for a parallel workflow -programmatic_summarizer_config = AgentConfig( - name="programmatic_summarizer", - instruction="You are specialized in summarizing information clearly and concisely.", - server_names=["fetch"], - llm_config=AugmentedLLMConfig( - factory=AnthropicAugmentedLLM, model="claude-3-sonnet-20240229" - ), -) - -programmatic_fact_checker_config = AgentConfig( - name="programmatic_fact_checker", - instruction="You verify facts and identify potential inaccuracies in information.", - server_names=["fetch", "filesystem"], - llm_config=AugmentedLLMConfig(factory=OpenAIAugmentedLLM, model="gpt-4o"), -) - -programmatic_editor_config = AgentConfig( - name="programmatic_editor", - instruction="You refine and improve text, focusing on clarity and readability.", - server_names=[], - llm_config=AugmentedLLMConfig(factory=OpenAIAugmentedLLM, model="gpt-4o"), -) - -# Create a parallel workflow configuration -programmatic_research_team_config = AgentConfig( - name="programmatic_research_team", - instruction="You are a research team that produces high-quality, accurate content.", - server_names=["fetch", "filesystem"], - llm_config=AugmentedLLMConfig( - factory=AnthropicAugmentedLLM, model="claude-3-opus-20240229" - ), - parallel_config=ParallelLLMConfig( - fan_in_agent="programmatic_editor", - fan_out_agents=["programmatic_summarizer", "programmatic_fact_checker"], - concurrent=True, - ), -) - -# Register the configurations with the app using programmatic method -app.register_agent_config(research_agent_config) -app.register_agent_config(programmatic_summarizer_config) -app.register_agent_config(programmatic_fact_checker_config) -app.register_agent_config(programmatic_editor_config) -app.register_agent_config(programmatic_research_team_config) - -# ------------------------------------------------------------------------- -# Approach 3: Declarative agent configuration with FastMCPApp decorators -# ------------------------------------------------------------------------- - - -# Basic agent with OpenAI LLM -@app.agent( - "assistant", - "You are a helpful assistant that answers questions concisely.", - server_names=["calculator"], -) -def assistant_config(config): - # Configure the LLM to use - config.llm_config = AugmentedLLMConfig( - factory=OpenAIAugmentedLLM, model="gpt-4o", temperature=0.7 - ) - return config - - -# Component agents for router workflow -@app.agent( - "mathematician", - "You solve mathematical problems with precision.", - server_names=["calculator"], -) -def mathematician_config(config): - config.llm_config = AugmentedLLMConfig(factory=OpenAIAugmentedLLM, model="gpt-4o") - return config - - -@app.agent( - "programmer", - "You write and debug code in various programming languages.", - server_names=["filesystem"], -) -def programmer_config(config): - config.llm_config = AugmentedLLMConfig(factory=OpenAIAugmentedLLM, model="gpt-4o") - return config - - -@app.agent("writer", "You write creative and engaging content.", server_names=[]) -def writer_config(config): - config.llm_config = AugmentedLLMConfig( - factory=AnthropicAugmentedLLM, model="claude-3-sonnet-20240229" - ) - return config - - -# Router workflow using the decorator syntax -@app.router( - "specialist_router", - "You route requests to the appropriate specialist.", - agent_names=["mathematician", "programmer", "writer"], -) -def router_config(config): - config.llm_config = AugmentedLLMConfig(factory=OpenAIAugmentedLLM, model="gpt-4o") - # Configure top_k for the router - config.router_config.top_k = 1 - return config - - -async def main(): - # Initialize the app - await app.initialize() - - # Add the current directory to the filesystem server's args if needed - context = app.context - if "filesystem" in context.config.mcp.servers: - context.config.mcp.servers["filesystem"].args.extend([os.getcwd()]) - - # Log registered workflows and agent configurations - logger.info(f"Creating MCP server for {app.name}") - - logger.info("Registered workflows:") - for workflow_id in app.workflows: - logger.info(f" - {workflow_id}") - - logger.info("Registered agent configurations:") - for name, config in app.agent_configs.items(): - workflow_type = config.get_agent_type() or "basic" - logger.info(f" - {name} ({workflow_type})") - - # Create the MCP server that exposes both workflows and agent configurations - mcp_server = create_mcp_server_for_app(app) - - # Run the server - await mcp_server.run_stdio_async() - - -if __name__ == "__main__": - asyncio.run(main())