让Trae写一个AI的api中继服务
g4f的1337端口的api服务用起来有时候不太方便,这时候就
代码
from fastapi import FastAPI, Request, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse
from g4f.client import Client
import json
import osapp = FastAPI(title="G4F本地模型中继服务")# 允许跨域请求
app.add_middleware(CORSMiddleware,allow_origins=["*"],allow_credentials=True,allow_methods=["*"],allow_headers=["*"],
)# 初始化G4F客户端
client = Client()@app.post("/v1/chat/completions")
async def chat_completions(request: Request):try:# 1. 解析 OpenAI 格式的请求data = await request.json()print(f"==== 收到请求: {data}")# 提取消息内容messages = data.get("messages", [])if not messages:raise HTTPException(status_code=400, detail="No messages provided")# 2. 使用G4F客户端进行本地模型调用stream = data.get("stream", False)# 构建G4F调用参数g4f_params = {"messages": messages,"temperature": data.get("temperature", 0.7),"max_tokens": data.get("max_tokens", 500)}# 如果指定了模型,则使用指定模型,否则使用默认模型model = data.get("model")if model and model != "default":g4f_params["model"] = model# 3. 调用G4F模型if stream:# 流式响应处理async def generate_stream():try:response = client.chat.completions.create(**g4f_params, stream=True)for chunk in response:if hasattr(chunk, 'choices') and chunk.choices:content = chunk.choices[0].delta.content# 只有当内容不为None时才发送if content is not None:yield f"data: {json.dumps({'choices': [{'delta': {'content': content}}]})}\n\n"# 流结束时发送[DONE]信号yield "data: [DONE]\n\n"except Exception as e:yield f"data: {json.dumps({'error': str(e)})}\n\n"return StreamingResponse(generate_stream(),media_type="text/event-stream",headers={"Cache-Control": "no-cache","Connection": "keep-alive",})else:# 非流式响应response = client.chat.completions.create(**g4f_params)# 转换为OpenAI兼容格式return {"id": "chatcmpl-g4f-local","object": "chat.completion","created": 0,"model": model or "g4f-default","choices": [{"index": 0,"message": {"role": "assistant","content": response.choices[0].message.content},"finish_reason": "stop"}],"usage": {"prompt_tokens": 0,"completion_tokens": 0,"total_tokens": 0}}except Exception as e:raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")@app.get("/")
async def root():return {"message": "星河社区大模型中继服务","status": "running","endpoint": "/v1/chat/completions"}@app.get("/health")
async def health_check():return {"status": "healthy"}if __name__ == "__main__":import uvicornuvicorn.run(app, host="0.0.0.0", port=8000)