import os
from typing import Annotated
from langchain_openai import ChatOpenAI
from typing_extensions import TypedDictfrom langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messagesos.environ["LANGCHAIN_TRACING_V2"]="true"
os.environ["LANGCHAIN_PROJECT"]="LangchainDemo4"
os.environ["LANGCHAIN_API_KEY"]="***"#定义状态classState(TypedDict):messages: Annotated[list, add_messages]#将messages信息以追加的形式进行更新,不会被覆盖,汇总输出graph_builder = StateGraph(State)#创建图对象llm = ChatOpenAI(api_key='****',base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",model="qwen-plus-2025-04-28")#节点类defchatbot(state: State):#传入llm问题并将输出追加到state['messages']return{"messages":[llm.invoke(state["messages"])]}# The first argument is the unique node name# The second argument is the function or object that will be called whenever# the node is used.
graph_builder.add_node("chatbot", chatbot)#添加节点##定义起始边-结束边
graph_builder.add_edge(START,"chatbot")
graph_builder.add_edge("chatbot", END)
graph = graph_builder.compile()#编译defstream_graph_updates(user_input:str):for event in graph.stream({"messages":[{"role":"user","content": user_input}]}):print(event)#chatbot节点的输出for value in event.values():print("Assistant:", value["messages"][-1].content)#最新信息whileTrue:try:user_input =input("User: ")#持续输入问题if user_input.lower()in["quit","exit","q"]:#如果想要推出print("Goodbye!")breakstream_graph_updates(user_input)except:#出错时,自行询问并停止# fallback if input() is not availableuser_input ="What do you know about LangGraph?"print("User: "+ user_input)stream_graph_updates(user_input)break