官网示例代码

from typing_extensions import Literal
from langchain_core.messages import ToolMessage
from langchain_core.tools import tool
from langgraph.graph import MessagesState, StateGraph, START
from langgraph.types import Command
from langchain_openai import ChatOpenAI

model = ChatOpenAI(
    temperature=0,
    model="GLM-4-PLUS",
    openai_api_key="your api key",
    openai_api_base="https://open.bigmodel.cn/api/paas/v4/"
)



@tool
def transfer_to_multiplication_expert():
    """Ask multiplication agent for help."""
    # This tool is not returning anything: we're just using it
    # as a way for LLM to signal that it needs to hand off to another agent
    # (See the paragraph above)
    return


@tool
def transfer_to_addition_expert():
    """Ask addition agent for help."""
    return


def addition_expert(
    state: MessagesState,
) -> Command[Literal["multiplication_expert", "__end__"]]:
    system_prompt = (
        "You are an addition expert, you can ask the multiplication expert for help with multiplication. "
        "Always do your portion of calculation before the handoff."
    )
    messages = [{"role": "system", "content": system_prompt}] + state["messages"]
    ai_msg = model.bind_tools([transfer_to_multiplication_expert]).invoke(messages)
    # If there are tool calls, the LLM needs to hand off to another agent
    if len(ai_msg.tool_calls) > 0:
        tool_call_id = ai_msg.tool_calls[-1]["id"]
        # NOTE: it's important to insert a tool message here because LLM providers are expecting
        # all AI messages to be followed by a corresponding tool result message
        tool_msg = {
            "role": "tool",
            "content": "Successfully transferred",
            "tool_call_id": tool_call_id,
        }
        return Command(
            goto="multiplication_expert", update={"messages": [ai_msg, tool_msg]}
        )

    # If the expert has an answer, return it directly to the user
    return {"messages": [ai_msg]}


def multiplication_expert(
    state: MessagesState,
) -> Command[Literal["addition_expert", "__end__"]]:
    system_prompt = (
        "You are a multiplication expert, you can ask an addition expert for help with addition. "
        "Always do your portion of calculation before the handoff."
    )
    messages = [{"role": "system", "content": system_prompt}] + state["messages"]
    ai_msg = model.bind_tools([transfer_to_addition_expert]).invoke(messages)
    if len(ai_msg.tool_calls) > 0:
        tool_call_id = ai_msg.tool_calls[-1]["id"]
        tool_msg = {
            "role": "tool",
            "content": "Successfully transferred",
            "tool_call_id": tool_call_id,
        }
        return Command(goto="addition_expert", update={"messages": [ai_msg, tool_msg]})

    return {"messages": [ai_msg]}
builder = StateGraph(MessagesState)
builder.add_node("addition_expert", addition_expert)
builder.add_node("multiplication_expert", multiplication_expert)
# we'll always start with the addition expert
builder.add_edge(START, "addition_expert")
graph = builder.compile()
builder = StateGraph(MessagesState)
builder.add_node("addition_expert", addition_expert)
builder.add_node("multiplication_expert", multiplication_expert)
# we'll always start with the addition expert
builder.add_edge(START, "addition_expert")
graph = builder.compile()
from IPython.display import Image, display

display(Image(graph.get_graph().draw_mermaid_png()))

在这里插入图片描述

from langchain_core.messages import convert_to_messages


def pretty_print_messages(update):
    if isinstance(update, tuple):
        ns, update = update
        # skip parent graph updates in the printouts
        if len(ns) == 0:
            return

        graph_id = ns[-1].split(":")[0]
        print(f"Update from subgraph {graph_id}:")
        print("\n")

    for node_name, node_update in update.items():
        print(f"Update from node {node_name}:")
        print("\n")

        for m in convert_to_messages(node_update["messages"]):
            m.pretty_print()
        print("\n")
for chunk in graph.stream(
    {"messages": [("user", "what's (3 + 5) * 12")]},
):
    pretty_print_messages(chunk)
Update from node addition_expert:


==================================[1m Ai Message [0m==================================
Tool Calls:
  transfer_to_multiplication_expert (call_-9024512232541698720)
 Call ID: call_-9024512232541698720
  Args:
=================================[1m Tool Message [0m=================================

Successfully transferred


Update from node multiplication_expert:


==================================[1m Ai Message [0m==================================

The result of \((3 + 5) \times 12\) is \(96\).

代码解释

这段代码实现了一个基于状态图(StateGraph)的简单对话系统,该系统利用两个“专家”节点(加法专家和乘法专家)来处理用户的问题。每个专家节点都由一个函数表示,并且可以根据需要将任务传递给另一个专家。代码的主要部分如下:

  1. 模型和工具定义

    • 使用ChatOpenAI模型作为对话系统的底层语言模型。
    • 定义了两个工具函数transfer_to_multiplication_experttransfer_to_addition_expert,这些工具函数用于在专家之间传递控制权。
  2. 专家函数

    • addition_expert函数处理加法相关的查询,并可以在需要时通过transfer_to_multiplication_expert工具将任务传递给乘法专家。
    • multiplication_expert函数处理乘法相关的查询,并可以在需要时通过transfer_to_addition_expert工具将任务传递给加法专家。
  3. 状态图构建

    • 使用StateGraph构建了一个对话流程图,其中包括加法专家和乘法专家两个节点。
    • 设置对话的起始节点为加法专家。
  4. 状态图的可视化

    • 使用graph.get_graph().draw_mermaid_png()方法生成了状态图的图像表示,并通过IPython.display模块显示这个图像。
  5. 对话处理和输出

    • 定义了pretty_print_messages函数,用于格式化和打印对话中的消息。
    • 使用graph.stream方法开始对话流程,并通过pretty_print_messages函数显示对话的输出。

这段代码的主要功能是创建一个能够处理加法和乘法查询的对话系统。用户提出的问题首先由加法专家处理,如果问题涉及到乘法运算,加法专家会将问题传递给乘法专家。乘法专家完成计算后,将结果返回给用户。这个系统展示了如何使用状态图和专家节点来构建一个能够处理特定领域任务的对话系统。

类似例子

from typing import Literal

@tool
def transfer_to_english_expert():
    """向英文专家请求帮助。"""
    return

@tool
def transfer_to_chinese_expert():
    """向中文专家请求帮助。"""
    return

def chinese_expert(
    state: MessagesState,
) -> Command[Literal["english_expert", "__end__"]]:
    system_prompt = (
        "你只讲中文,遇到英文问题,向英文专家请求帮助"
    )
    messages = [{"role": "system", "content": system_prompt}] + state["messages"]
    ai_msg = model.bind_tools([transfer_to_english_expert]).invoke(messages)
    
    if len(ai_msg.tool_calls) > 0:
        tool_call_id = ai_msg.tool_calls[-1]["id"]
        tool_msg = {
            "role": "tool",
            "content": "成功移交至英文专家",
            "tool_call_id": tool_call_id,
        }
        return Command(
            goto="english_expert", update={"messages": [ai_msg, tool_msg]}
        )
    
    return {"messages": [ai_msg]}

def english_expert(
    state: MessagesState,
) -> Command[Literal["chinese_expert", "__end__"]]:
    system_prompt = (
        "You are an English expert. You only speak in English. "
    )
    messages = [{"role": "system", "content": system_prompt}] + state["messages"]
    ai_msg = model.bind_tools([transfer_to_chinese_expert]).invoke(messages)
    
    if len(ai_msg.tool_calls) > 0:
        tool_call_id = ai_msg.tool_calls[-1]["id"]
        tool_msg = {
            "role": "tool",
            "content": "Successfully transferred to Chinese expert",
            "tool_call_id": tool_call_id,
        }
        return Command(goto="chinese_expert", update={"messages": [ai_msg, tool_msg]})
    
    return {"messages": [ai_msg]}

builder = StateGraph(MessagesState)
builder.add_node("chinese_expert", chinese_expert)
builder.add_node("english_expert", english_expert)
builder.add_edge(START, "chinese_expert")
graph = builder.compile()

from IPython.display import Image, display

display(Image(graph.get_graph().draw_mermaid_png()))

在这里插入图片描述

for chunk in graph.stream(
    {"messages": [("user", "what's deepseek?")]},
):
    pretty_print_messages(chunk)
Update from node chinese_expert:


==================================[1m Ai Message [0m==================================
Tool Calls:
  transfer_to_english_expert (call_-9024511682785680250)
 Call ID: call_-9024511682785680250
  Args:
=================================[1m Tool Message [0m=================================

成功移交至英文专家


Update from node english_expert:


==================================[1m Ai Message [0m==================================

DeepSeek is a term that might refer to a variety of concepts depending on the context. Here are a few possible interpretations:

1. **Search Engine Technology**: It could be a metaphorical or branded name for a search engine or a search algorithm designed to delve deeply into data sources to retrieve highly relevant information.

2. **Philosophical or Psychological Concept**: In a more abstract sense, it might refer to the act of seeking deep understanding or truth, often used in philosophical or psychological discussions.

3. **Business or Product Name**: It could be the name of a company, product, or service that specializes in in-depth research, analytics, or data mining.

4. **Technical Term in a Specific Field**: In certain scientific or technical fields, "deepseek" might be a term used to describe a particular method or process.

Without more context, it's challenging to provide a definitive answer. If you have a specific context or industry in mind, please provide more details for a more accurate explanation.
Logo

GitCode 天启AI是一款由 GitCode 团队打造的智能助手,基于先进的LLM(大语言模型)与多智能体 Agent 技术构建,致力于为用户提供高效、智能、多模态的创作与开发支持。它不仅支持自然语言对话,还具备处理文件、生成 PPT、撰写分析报告、开发 Web 应用等多项能力,真正做到“一句话,让 Al帮你完成复杂任务”。

更多推荐