当前位置: 首页 > news >正文

agent 入门

实战简单的agent示例

代码功能概述

  • 目标:创建一个能自主思考(推理)并调用工具(行动)的 AI Agent,用于回答需要实时信息的问题(如“Agent 最新研究进展”)。

  • 核心组件

    • LLM(GPT-4):负责推理和决策。

    • SerpAPI(搜索引擎):当 LLM 缺乏知识时,用于搜索外部信息。

    • ReAct 框架:让 Agent 循环执行“思考→行动→观察→优化”。

from dotenv import load_dotenv
load_dotenv() #load_dotenv() 加载 .env 文件中的环境变量(如 OpenAI 和 SerpAPI 的 API 密钥)

from langchain import hub
from langchain_openai import ChatOpenAI
from langchain_community.utilities import SerpAPIWrapper
from langchain_core.tools import Tool
from langchain.agents import create_react_agent, AgentExecutor

# Initialize prompt
prompt = hub.pull("hwchase17/react")
print(prompt)   # 从 LangChain Hub 加载预定义的 ReAct 提示模板(hwchase17/react),该模板包含思考、工具调用和最终回答的引导语。

# Initialize LLM - using ChatOpenAI instead of OpenAI for better compatibility
llm = ChatOpenAI(
    model="gpt-4o-mini",  # Use a known working model
    temperature=0
)

# Initialize search tool
search = SerpAPIWrapper()
tools = [
    Tool(
        name="Search",
        func=search.run,
        description="当大模型没有相关知识时,用于搜索知识"
    ),
]

# Create agent
agent = create_react_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)

#通过 AgentExecutor 执行两次相同查询,观察结果一致性。
#verbose=True 会打印工具调用和 LLM 输出的详细日志,便于调试。

# Run queries
print("第一次运行的结果:")
try:
    result1 = agent_executor.invoke({"input": "当前Agent最新研究进展是什么?"})
    print(result1)
except Exception as e:
    print(f"Error in first query: {e}")

print("第二次运行的结果:")
try:
    result2 = agent_executor.invoke({"input": "当前Agent最新研究进展是什么?"})
    print(result2)
except Exception as e:
    print(f"Error in second query: {e}")




 自行解决各类api key

.env 文件

OPENAI_API_KEY="sk-xxx"
OPENAI_API_BASE="https:xxx"
SERPAPI_API_KEY="1abd77afxxxxx"

有记忆版本:

import sys
from dotenv import load_dotenv
load_dotenv()

from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from langchain_community.utilities import SerpAPIWrapper
from langchain_core.tools import Tool
from langchain.agents import create_react_agent, AgentExecutor
from langchain.memory import ConversationBufferMemory

# Initialize LLM 
llm = ChatOpenAI(
    model="gpt-4o-mini",
    temperature=0
)

# Enhanced search tool
search = SerpAPIWrapper()
tools = [
    Tool(
        name="Search",
        func=search.run,
        description="Useful for searching latest AI research."
    ),
]

# Standard ReAct template (English)
react_template = """Answer the following questions as best you can. You have access to the following tools:

{tools}

Use the following format:

Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question

Conversation History:
{chat_history}

Begin!

Question: {input}
Thought:{agent_scratchpad}"""

custom_prompt = PromptTemplate.from_template(react_template)

# Create agent with memory
agent = create_react_agent(llm, tools, custom_prompt)
memory = ConversationBufferMemory(
    memory_key="chat_history",
    return_messages=True,
    input_key="input",
    output_key="output"
)

agent_executor = AgentExecutor(
    agent=agent,
    tools=tools,
    memory=memory,
    verbose=True,
    handle_parsing_errors=True,
    max_iterations=3,
    return_intermediate_steps=False
)

def clean_output(output):
    """Extract final answer if exists"""
    if "Final Answer:" in output:
        return output.split("Final Answer:")[-1].strip()
    return output

# First query
print("\n🔍 Query 1:")
result1 = agent_executor.invoke({"input": "What are the top 3 AI Agent research breakthroughs"})
answer1 = clean_output(result1["output"])
print("\n💡 Answer:", answer1)

# Second query (with memory)
print("\n🔍 Query 2 (with memory):")
result2 = agent_executor.invoke({
    "input": "just list these 3 breakthroughs briefly",
    # Explicit memory injection
    "chat_history": [
        ("human", result1["input"]),
        ("ai", answer1)
    ]
})
print("\n💡 Answer:", clean_output(result2["output"]))

# Debug: Show full memory
print("\n🧠 Current Memory:")
print(memory.load_memory_variables({}))

测试 接口

from openai import OpenAI

client = OpenAI(
  api_key="sk-xxx28aD5",
  base_url = "https://xxxx"
)

completion = client.chat.completions.create(
  model="gpt-4o-mini",
  store=True,
  messages=[
    {"role": "user", "content": "write a haiku about ai"}
  ]
)

print(completion.choices[0].message)

测试 图片模型

from openai import OpenAI
import os
from dotenv import load_dotenv
import requests

# 加载环境变量
load_dotenv()

# 初始化客户端(自动从环境变量读取OPENAI_API_KEY)
client = OpenAI(
    api_key="sk-xxx",
    base_url = "https://xxxxv1")

try:
    response = client.images.generate(
        model="dall-e-3",  # 必须使用DALL·E模型
        prompt="电商生日礼物宣传海报",
        size="1024x1024",
        quality="standard",
        n=1,
    )

    # 安全获取URL
    if response.data and len(response.data) > 0:
        image_url = response.data[0].url
        print("图片生成成功,URL:", image_url)

        # 下载图片
        image_data = requests.get(image_url).content
        with open("flower_poster.png", "wb") as f:
            f.write(image_data)
        print("图片已保存为 flower_poster.png")
    else:
        print("错误:未收到有效的图片URL")

except Exception as e:
    print(f"发生错误: {str(e)}")

参考书籍:大模型应用开发动手做AI Agent
 

http://www.dtcms.com/a/112028.html

相关文章:

  • ARM-外部中断,ADC模数转换器
  • Vue3学习二
  • 【Node】一文掌握 Express 的详细用法(Express 备忘速查)
  • 【面试篇】Mysql
  • DHCP之中继 Relay-snooping及配置命令
  • Python_level1_字符串_11
  • 给项目中的用户头像,添加用户的历史头像记录功能
  • 深入理解SQL中的<>运算符:不等于的灵活运用
  • C++20的协程简介
  • 轨迹速度聚类 实战
  • 【C++代码整洁之道】第九章 设计模式和习惯用法
  • VSCode运行,各类操作缓慢,如何清理
  • anaconda3/conda依赖安装、环境配置、关联指定python版本
  • 性能测试之jmeter的基本使用
  • [C++面试] new、delete相关面试点
  • 从软件分层架构视角理解英语学习
  • 为什么有的深度学习训练,有训练集、验证集、测试集3个划分,有的只是划分训练集和测试集?
  • 【YOLO系列(V5-V12)通用数据集-X光包裹内违禁品检测数据集】
  • Java 大视界 -- Java 大数据在智能供应链库存优化与成本控制中的应用策略(172)
  • AI平台初步规划实现和想法
  • 20信号和槽_connect函数的用法(1)
  • 隐私投资的收益大于成本
  • 【更新至2024年】2000-2024年各省专利侵权案件结案数数据
  • 基于大模型预测不稳定性心绞痛的多维度研究与应用
  • 若依框架二次开发——RuoYi-AI 集成本地大模型
  • 新冠(covid19)完整测序流程(java调用docker容器方式实现,算法为nextclade和pangolin)
  • 如何在未知iv值情况下如何进行手工破译ROT密文?
  • 【python中级】解压whl文件内容
  • Muduo网络库实现 [十三] - HttpRequest模块
  • 【AI论文】AnimeGamer:基于下一游戏状态预测的无限动漫人生模拟