- LangGraph - 构建复杂的多步骤 Agent 工作流
- OpenAI Agents SDK - OpenAI 官方 Agent 框架
- Microsoft AutoGen - 微软的多 Agent 对话框架
- Google ADK - Google Agent Development Kit
核心集成模式
所有框架的集成都遵循以下核心模式:Copy
from ppio_sandbox.agent_runtime import AgentRuntimeApp
# 1. 创建 Agent Runtime 应用实例
app = AgentRuntimeApp()
# 2. 初始化你的 Agent 框架
# 3. 使用装饰器定义入口点
@app.entrypoint
def agent_invocation(request: dict) -> dict:
"""
Args:
request: 请求数据,通常包含 prompt 等字段
Returns:
响应数据字典
"""
prompt = request.get("prompt", "")
# 调用你的 Agent 框架
result = your_agent.run(prompt)
return {"result": result}
# 4. 运行应用
if __name__ == "__main__":
app.run()
LangGraph
LangGraph 是 LangChain 官方推出的用于构建有状态、多步骤 Agent 应用的框架。示例代码
完整示例项目请参考这里Copy
from langchain_community.chat_models import ChatOpenAI
from typing_extensions import TypedDict
from typing import Annotated
from langgraph.graph import StateGraph, START
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
# 导入 PPIO Agent Runtime
from ppio_sandbox.agent_runtime import AgentRuntimeApp
app = AgentRuntimeApp()
# 定义状态
class State(TypedDict):
messages: Annotated[list, add_messages]
# 初始化 LLM
llm = ChatOpenAI(model="gpt-4")
# 定义工具
def get_weather(location: str) -> str:
"""获取指定位置的天气信息"""
return f"{location} 的天气是晴天,温度 23°C"
tools = [get_weather]
llm_with_tools = llm.bind_tools(tools)
# 定义节点函数
def chatbot(state: State):
return {"messages": [llm_with_tools.invoke(state["messages"])]}
# 构建图
graph_builder = StateGraph(State)
graph_builder.add_node("chatbot", chatbot)
graph_builder.add_node("tools", ToolNode(tools=tools))
graph_builder.add_conditional_edges(
"chatbot",
tools_condition,
)
graph_builder.add_edge("tools", "chatbot")
graph_builder.add_edge(START, "chatbot")
graph = graph_builder.compile()
# 定义入口点
@app.entrypoint
def agent_invocation(request: dict) -> dict:
"""LangGraph Agent 入口点"""
prompt = request.get("prompt", "你好!")
# 调用 LangGraph
result = graph.invoke({
"messages": [{"role": "user", "content": prompt}]
})
# 提取最后一条消息
final_message = result['messages'][-1].content
return {"result": final_message}
if __name__ == "__main__":
app.run()
OpenAI Agents SDK
OpenAI Agents SDK 是 OpenAI 官方提供的构建 AI Agent 的工具包。示例代码
完整示例项目请参考这里Copy
import os
from ppio_sandbox.agent_runtime import AgentRuntimeApp
from openai import AsyncOpenAI
# 创建应用
app = AgentRuntimeApp()
# 定义工具函数
def get_weather(city: str) -> str:
"""获取城市天气(模拟)"""
weather_data = {
"北京": "晴天,15°C",
"上海": "多云,20°C",
"深圳": "小雨,25°C",
}
return weather_data.get(city, f"{city}:晴天,23°C")
# 工具定义
TOOLS = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "获取指定城市的天气信息",
"parameters": {
"type": "object",
"properties": {
"city": {
"type": "string",
"description": "城市名称,如 '北京', '上海'"
}
},
"required": ["city"]
}
}
}
]
# Agent 核心逻辑
async def run_agent(query: str) -> str:
"""运行 OpenAI Agent(支持函数调用)"""
client = AsyncOpenAI(
base_url=os.getenv("OPENAI_API_BASE"),
api_key=os.getenv("PPIO_API_KEY"),
)
messages = [
{"role": "system", "content": "你是一个有用的 AI 助手,可以查询天气。"},
{"role": "user", "content": query}
]
# 第一次调用
response = await client.chat.completions.create(
model=os.getenv("MODEL_NAME", "deepseek/deepseek-v3.1-terminus"),
messages=messages,
tools=TOOLS,
tool_choice="auto"
)
response_message = response.choices[0].message
# 如果需要调用工具
if response_message.tool_calls:
messages.append(response_message)
for tool_call in response_message.tool_calls:
function_args = eval(tool_call.function.arguments)
function_response = get_weather(**function_args)
messages.append({
"tool_call_id": tool_call.id,
"role": "tool",
"name": "get_weather",
"content": function_response
})
# 第二次调用获取最终响应
final_response = await client.chat.completions.create(
model=os.getenv("MODEL_NAME", "deepseek/deepseek-v3.1-terminus"),
messages=messages
)
return final_response.choices[0].message.content
return response_message.content
# PPIO Agent Runtime 入口点
@app.entrypoint
async def agent_invocation(request: dict) -> dict:
"""入口点函数"""
prompt = request.get("prompt", "你好!")
result = await run_agent(prompt)
return {"result": result}
# 启动应用
if __name__ == "__main__":
app.run()
Microsoft AutoGen
AutoGen 是微软开发的多 Agent 对话框架,支持多 Agent 协作和对话,可以独立运行也可以与人类一起工作。示例代码
完整示例项目请参考这里Copy
import os
from ppio_sandbox.agent_runtime import AgentRuntimeApp
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import TextMessage
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_core.models import ModelFamily, ModelInfo
# 创建应用
app = AgentRuntimeApp()
# 定义工具函数
async def get_weather(city: str) -> str:
"""获取指定城市的天气"""
weather_data = {
"北京": "晴天,15°C",
"上海": "多云,20°C",
"深圳": "小雨,25°C",
}
return weather_data.get(city, f"{city}:晴天,23°C")
# 创建 AutoGen Agent
def create_agent():
"""创建 AutoGen Agent"""
model_client = OpenAIChatCompletionClient(
base_url=os.getenv("OPENAI_BASE_URL", "https://api.ppinfra.com/v3/openai"),
model=os.getenv("MODEL_NAME", "deepseek/deepseek-v3.1-terminus"),
api_key=os.getenv("OPENAI_API_KEY"),
model_info=ModelInfo(
vision=False,
function_calling=True,
json_output=True,
family=ModelFamily.UNKNOWN,
),
)
agent = AssistantAgent(
name="assistant",
model_client=model_client,
tools=[get_weather],
system_message="你是一个有用的 AI 助手,可以查询天气信息。",
reflect_on_tool_use=True,
)
return agent
# 运行 Agent
async def run_agent(prompt: str) -> str:
"""运行 AutoGen Agent"""
agent = create_agent()
# 创建用户消息
message = TextMessage(content=prompt, source="user")
# 运行 Agent
response_message = await agent.on_messages([message])
# 提取响应内容
if response_message and hasattr(response_message, 'chat_message'):
return response_message.chat_message.content
return str(response_message)
# PPIO Agent Runtime 入口点
@app.entrypoint
async def agent_invocation(request: dict):
"""入口点函数"""
prompt = request.get("prompt", "你好!")
result = await run_agent(prompt)
return {"result": result}
# 启动应用
if __name__ == "__main__":
app.run()
Google ADK
Google Agent Development Kit (ADK) 是 Google 提供的 Agent 开发工具包。完整示例
完整示例项目请参考这里Copy
import os
import uuid
import asyncio
from google.adk.agents import LlmAgent
from google.adk.runners import Runner
from google.adk.sessions import InMemorySessionService
from google.adk.tools import google_search
from google.genai import types
from ppio_sandbox.agent_runtime import AgentRuntimeApp
# 创建应用
app = AgentRuntimeApp()
APP_NAME = "google_search_agent"
# 创建 Google ADK Agent
root_agent = LlmAgent(
model=os.getenv("GEMINI_MODEL", "gemini-2.0-flash"),
name=APP_NAME,
instruction="我可以通过搜索互联网回答你的问题。尽管问我!",
tools=[google_search]
)
# 创建 Session 服务和 Runner
session_service = InMemorySessionService()
runner = Runner(
agent=root_agent,
app_name=APP_NAME,
session_service=session_service
)
# 运行 Agent
async def run_agent(query: str) -> str:
"""运行 Google ADK Agent"""
user_id = "user_default"
session_id = str(uuid.uuid4())
# 创建 Session
await session_service.create_session(
app_name=APP_NAME,
user_id=user_id,
session_id=session_id
)
# 创建用户消息
user_content = types.Content(
role='user',
parts=[types.Part(text=query)]
)
# 运行 Agent
result = ""
async for event in runner.run_async(
user_id=user_id,
session_id=session_id,
new_message=user_content
):
if event.is_final_response() and event.content and event.content.parts:
result = event.content.parts[0].text
return result
# PPIO Agent Runtime 入口点
@app.entrypoint
def agent_invocation(request: dict):
"""入口点函数"""
prompt = request.get("prompt", "你好!")
result = asyncio.run(run_agent(prompt))
return {"result": result}
# 启动应用
if __name__ == "__main__":
app.run()