
云原生 API 网关 APISIX 入门教程
今天要跟大家分享一个超级有趣的项目 - 用Python搭建一个AI智能助手平台。这个平台可以让AI自主思考和行动,就像钢铁侠的贾维斯一样!我们会用到OpenAI的API,但不需要复杂的Langchain等第三方库,用最基础的Python代码就能实现。让我们开始动手吧!
## 1. 项目准备工作
首先需要准备以下环境:
- Python 3.7+
- OpenAI API密钥
- 代码编辑器(推荐VS Code)
创建一个新的Python虚拟环境并安装必要的包:
```python
# 创建虚拟环境
python -m venv agent_env
source agent_env/bin/activate # Linux/Mac
# Windows用:.\agent_env\Scripts\activate
# 安装必要的包
pip install openai python-dotenv
我们需要创建3个核心文件:
project/
├── main.py # 主程序
├── actions.py # 功能函数
├── prompts.py # 提示词模板
└── .env # 环境变量
OPENAI_API_KEY=sk-your-api-key-here
先在actions.py中定义Agent可以执行的操作:
def get_website_info(url):
“”“模拟获取网站信息的函数”“”
info = {
“google.com”: {“response_time”: 0.3, “status”: “online”},
“github.com”: {“response_time”: 0.5, “status”: “online”}
}
return info.get(url, {“response_time”: 1.0, “status”: “unknown”})
def search_knowledge(query):
“”“模拟知识库搜索”“”
knowledge_base = {
“python”: “Python是一种高级编程语言”,
“openai”: “OpenAI是一家AI研究公司”
}
return knowledge_base.get(query, “未找到相关信息”)
在prompts.py中设置ReAct提示词模板:
SYSTEM_PROMPT = “”“你是一个智能AI助手,运行在Thought(思考) -> Action(行动) -> Response(响应)的循环中。
可用的操作有:
1. get_website_info: 获取网站信息
2. search_knowledge: 搜索知识库
示例格式:
Thought: 我需要了解网站状态
Action: {”name“: ”get_website_info“, ”args“: {”url“: ”google.com“}}
Response: 根据获得的信息进行回答
每次行动后请说”PAUSE“等待响应。
”“”
在main.py中实现Agent的核心逻辑:
from openai import OpenAI
import json
import os
from dotenv import load_dotenv
from actions import *
from prompts import SYSTEM_PROMPT
load_dotenv()
client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
def run_conversation(user_input):
messages = [
{“role”: “system”, “content”:SYSTEM_PROMPT},
{“role”: “user”, “content”:user_input}
]
for _ in range(5): # 最多5轮对话
response = client.chat.completions.create(
model=“gpt-3.5-turbo”,
messages=messages
)
assistant_message = response.choices[0].message.content
if “PAUSE” not in assistant_message:
return assistant_message
# 解析Action并执行
try:
action = json.loads(assistant_message.split(“Action: ”)[1].split(“PAUSE”)[0])
if action[“name”] == “get_website_info”:
result = get_website_info(**action[“args”
elif action[“name”] == “search_knowledge”:
result = search_knowledge(**action[“args”])
messages.append({“role”: “assistant”, “content”:assistant_message})
messages.append({“role”: “user”, “content”:f“Action result: {result}”})
except Exception as e:
messages.append({“role”: “user”, “content”:f“Error: {str(e)}”})
return “抱歉,我没能完成这个任务”
# 测试运行
if __name__ == “__main__”:
test_input = “google.com的响应时间是多少?”
print(run_conversation(test_input))
## 6. 添加记忆模块
为了让我们的Agent能够记住对话历史,我们新建一个memory.py:
```python
class ConversationMemory:
def __init__(self, max_tokens=1000):
self.conversations = []
self.max_tokens = max_tokens
def add_memory(self, role, content):
self.conversations.append({
“role”:role,
“content”:content
})
# 简单的记忆管理,超过最大限制就移除最早的记忆
while self._estimate_tokens() > self.max_tokens:
self.conversations.pop(0)
def _estimate_tokens(self):
# 粗略估算token数量
return sum(len(conv[“content”].split()) * 1.3 for conv in self.conversations)
def get_relevant_memory(self, query):
# 简单的相关性搜索
relevant = []
for conv in self.conversations:
if any(word in conv[“content”].lower()
for word in query.lower().split()):
relevant.append(conv)
return relevant
创建utils.py添加辅助功能:
import logging
import time
from functools import wraps
# 配置日志
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
filename='agent.log'
)
def retry_on_error(max_retries=3, delay=1):
“”“错误重试装饰器”“”
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
for i in range(max_retries):
try:
return func(*args, **kwargs)
except Exception as e:
logging.error(f“尝试 {i+1}/{max_retries} 失败: {str(e)}”)
if i < max_retries - 1:
time.sleep(delay)
raise Exception(f“在{max_retries}次尝试后失败”)
return wrapper
return decorator
扩展actions.py with更多功能:
import pandas as pd
import requests
from bs4 import BeautifulSoup
from utils import retry_on_error
class AgentActions:
@retry_on_error(max_retries=3)
def web_search(self, query):
“”“模拟网络搜索”“”
# 实际项目中可以接入搜索API
return f“搜索结果: {query}”
def analyze_data(self, data_str):
“”“简单的数据分析”“”
try:
# 将字符串转换为DataFrame
data = pd.read_json(data_str)
analysis = {
“rows”:len(data),
“columns”:list(data.columns),
“summary”: data.describe().to_dict()
}
return analysis
except Exception as e:
return f“数据分析错误: {str(e)}”
@retry_on_error()
def fetch_webpage(self, url):
“”“获取网页内容”“”
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
return {
“title”:soup.title.string if soup.title else “No title”,
“text”:soup.get_text()[:500] # 只返回前500字符
}
更新main.py使用新功能:
from memory import ConversationMemory
from actions import AgentActions
import logging
from utils import retry_on_error
class Agent:
def __init__(self):
self.memory = ConversationMemory()
self.actions = AgentActions()
self.client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
@retry_on_error()
def get_response(self, user_input):
# 获取相关记忆
relevant_memory = self.memory.get_relevant_memory(user_input)
# 构建消息
messages = [
{“role”: “system”, “content”:SYSTEM_PROMPT},
*relevant_memory,
{“role”: “user”, “content”:user_input}
]
try:
response = self.client.chat.completions.create(
model=“gpt-3.5-turbo”,
messages=messages
)
response_content = response.choices[0].message.content
self.memory.add_memory(“assistant”, response_content)
return response_content
except Exception as e:
logging.error(f“获取响应失败: {str(e)}”)
return “抱歉,我遇到了一些问题,请稍后再试”
# 使用示例
if __name__ == “__main__”:
agent = Agent()
# 测试对话
conversations = [
“Python是什么编程语言?”,
“它有哪些主要特点?”,
“给我看一个简单的Python代码示例”
]
for conv in conversations:
print(f“\n用户: {conv}”)
response = agent.get_response(conv)
print(f“AI: {response}”)
试试实现这些功能:
本文章转载微信公众号@楼谈