FastGPT源码解析 Agent 大模型对接接口和使用详解
FastGPT 大模型对接核心代码分析
核心架构概览
FastGPT 采用统一模型抽象层设计,通过标准化接口对接多种大模型,支持 LLM、Embedding、ReRank、TTS、STT 等多种 AI 能力。
支持各种大模型能力的配置,包括本地ollama、各个AI云厂商的API接入配置,支持知识库的分词、排序、向量化处理,支持Agent的对话和业务逻辑处理,完成完整的Agent可视化配置支撑。
核心文件主要主要在packages/global/core/ai/目录,有model.ts、model.d.ts、config.ts、index.ts、ReRank.ts等
1. 模型管理架构
全局模型映射
// 全局模型存储结构
declare global {var llmModelMap: Map<string, LLMModelItemType>;var embeddingModelMap: Map<string, EmbeddingModelItemType>;var reRankModelMap: Map<string, ReRankModelItemType>;var ttsModelMap: Map<string, TTSModelType>;var sttModelMap: Map<string, STTModelType>;var systemDefaultModel: {llm: LLMModelItemType;embedding: EmbeddingModelItemType;rerank?: ReRankModelItemType;tts?: TTSModelType;stt?: STTModelType;};
}
模型获取接口
// 统一的模型获取接口
export const getLLMModel = (model?: string) => {if (!model) return getDefaultLLMModel();return global.llmModelMap.get(model) || getDefaultLLMModel();
};export const getEmbeddingModel = (model?: string) => {if (!model) return getDefaultEmbeddingModel();return global.embeddingModelMap.get(model) || getDefaultEmbeddingModel();
};export const getReRankModel = (model?: string) => {if (!model) return getDefaultRerankModel();return global.reRankModelMap.get(model) || getDefaultRerankModel();
};// 通用模型查找
export const findAIModel = (model: string) => {return (global.llmModelMap.get(model) ||global.embeddingModelMap.get(model) ||global.ttsModelMap.get(model) ||global.sttModelMap.get(model) ||global.reRankModelMap.get(model));
};
2. LLM 模型对接
模型配置结构
export type LLMModelItemType = {provider: ModelProviderIdType; // 提供商: OpenAI/Claude/GLM等model: string; // 模型名称name: string; // 显示名称// 能力参数maxContext: number; // 最大上下文长度maxResponse: number; // 最大响应长度quoteMaxToken: number; // 最大引用TokenmaxTemperature?: number; // 最大温度值// 功能支持vision?: boolean; // 视觉能力reasoning?: boolean; // 推理能力functionCall: boolean; // 函数调用toolChoice: boolean; // 工具选择// 专用功能datasetProcess?: boolean; // 知识库处理usedInClassify?: boolean; // 问题分类usedInExtractFields?: boolean; // 内容提取usedInToolCall?: boolean; // 工具调用// 自定义配置defaultSystemChatPrompt?: string; // 默认系统提示词defaultConfig?: Record<string, any>; // 默认请求配置fieldMap?: Record<string, string>; // 字段映射// 直连配置requestUrl?: string; // 自定义请求URLrequestAuth?: string; // 自定义认证
};
统一 API 客户端
export const getAIApi = (props?: { userKey?: OpenaiAccountType; timeout?: number
}) => {const { userKey, timeout } = props || {};// 优先级: 用户配置 > 全局配置 > 环境变量const baseUrl = userKey?.baseUrl || global?.systemEnv?.oneapiUrl || openaiBaseUrl;const apiKey = userKey?.key || global?.systemEnv?.chatApiKey || openaiBaseKey;return new OpenAI({baseURL: baseUrl,apiKey,httpAgent: global.httpsAgent,timeout,maxRetries: 2});
};
聊天完成接口
export const createChatCompletion = async ({body, userKey, timeout, options
}) => {const modelConstantsData = getLLMModel(body.model);const ai = getAIApi({ userKey, timeout });// 支持自定义请求路径和认证const response = await ai.chat.completions.create(body, {...options,...(modelConstantsData.requestUrl ? { path: modelConstantsData.requestUrl } : {}),headers: {...options?.headers,...(modelConstantsData.requestAuth ? { Authorization: `Bearer ${modelConstantsData.requestAuth}` } : {})}});// 判断响应类型const isStreamResponse = typeof response === 'object' && response !== null && ('iterator' in response || 'controller' in response);return { response, isStreamResponse };
};
3. Embedding 模型对接
Embedding 配置
export type EmbeddingModelItemType = {provider: ModelProviderIdType;model: string;name: string;// Token 配置defaultToken: number; // 默认分块TokenmaxToken: number; // 最大Tokenweight: number; // 训练权重// 处理配置normalization?: boolean; // 归一化处理hidden?: boolean; // 是否隐藏// 自定义配置defaultConfig?: Record<string, any>; // 通用配置dbConfig?: Record<string, any>; // 存储配置queryConfig?: Record<string, any>; // 查询配置
};
向量化实现
export async function getVectorsByText({ model, input, type
}: GetVectorProps) {const ai = getAIApi();// 根据类型选择配置const config = {...model.defaultConfig,...(type === EmbeddingTypeEnm.db && model.dbConfig),...(type === EmbeddingTypeEnm.query && model.queryConfig),model: model.model,input: [input]};const result = await ai.embeddings.create(config,model.requestUrl ? {path: model.requestUrl,headers: model.requestAuth ? {Authorization: `Bearer ${model.requestAuth}`} : undefined} : {});// 处理向量数据const vectors = await Promise.all(result.data.map(item => unityDimensional(item.embedding)) // 统一维度.map(item => {if (model.normalization) return normalization(item); // 归一化return item;}));return { tokens: await countPromptTokens(input), vectors };
}// 统一向量维度到1536
function unityDimensional(vector: number[]) {if (vector.length > 1536) {return vector.slice(0, 1536);}const zeroVector = new Array(1536 - vector.length).fill(0);return vector.concat(zeroVector);
}// L2归一化处理
function normalization(vector: number[]) {if (vector.some(item => item > 1)) {const norm = Math.sqrt(vector.reduce((sum, val) => sum + val * val, 0));return vector.map(val => val / norm);}return vector;
}
4. ReRank 模型对接
ReRank 实现
export function reRankRecall({model = getDefaultRerankModel(),query,documents
}: {model?: ReRankModelItemType;query: string;documents: { id: string; text: string }[];
}): Promise<ReRankCallResult> {const { baseUrl, authorization } = getAxiosConfig();return POST<PostReRankResponse>(model.requestUrl ? model.requestUrl : `${baseUrl}/rerank`,{model: model.model,query,documents: documents.map(doc => doc.text)},{headers: {Authorization: model.requestAuth ? `Bearer ${model.requestAuth}` : authorization},timeout: 30000}).then(data => {return data?.results?.map(item => ({id: documents[item.index].id,score: item.relevance_score}));});
}
5. 模型配置管理
配置文件结构
{"llmModels": [{"provider": "OpenAI","model": "gpt-4o-mini","name": "gpt-4o-mini","maxContext": 128000,"maxResponse": 16000,"quoteMaxToken": 120000,"vision": true,"datasetProcess": true,"usedInClassify": true,"usedInExtractFields": true,"usedInToolCall": true,"toolChoice": true,"functionCall": false,"defaultConfig": {},"fieldMap": {}}],"vectorModels": [{"provider": "OpenAI","model": "text-embedding-3-small","name": "text-embedding-3-small","defaultToken": 512,"maxToken": 3000,"weight": 100}],"reRankModels": [],"audioSpeechModels": [{"provider": "OpenAI","model": "tts-1","name": "OpenAI TTS1","voices": [{ "label": "Alloy", "value": "alloy" },{ "label": "Echo", "value": "echo" }]}],"whisperModel": {"provider": "OpenAI","model": "whisper-1","name": "Whisper1"}
}
动态模型管理
// 模型配置更新
export const updateModelConfig = async (modelData: any) => {// 验证模型配置const validatedModel = validateModelConfig(modelData);// 更新全局映射if (validatedModel.type === 'llm') {global.llmModelMap.set(validatedModel.model, validatedModel);} else if (validatedModel.type === 'embedding') {global.embeddingModelMap.set(validatedModel.model, validatedModel);}// 持久化配置await saveModelConfig(validatedModel);
};// 模型测试
export const testModel = async (modelConfig: any) => {try {if (modelConfig.type === 'llm') {const response = await createChatCompletion({body: {model: modelConfig.model,messages: [{ role: 'user', content: 'Hello' }],max_tokens: 10}});return { success: true, response };}if (modelConfig.type === 'embedding') {const result = await getVectorsByText({model: modelConfig,input: 'test text'});return { success: true, vectors: result.vectors };}} catch (error) {return { success: false, error: error.message };}
};
6. 多提供商支持
提供商适配
// 支持的模型提供商
export enum ModelProviderIdType {OpenAI = 'OpenAI',Anthropic = 'Anthropic',Google = 'Google',Baidu = 'Baidu',ByteDance = 'ByteDance',Moonshot = 'Moonshot',DeepSeek = 'DeepSeek',Other = 'Other'
}// 提供商特殊处理
const providerAdapters = {[ModelProviderIdType.OpenAI]: {formatRequest: (body) => body,formatResponse: (response) => response},[ModelProviderIdType.Anthropic]: {formatRequest: (body) => ({...body,// Claude 特殊格式转换}),formatResponse: (response) => ({// 响应格式标准化})}
};
字段映射处理
// 处理不同模型的字段差异
const applyFieldMapping = (body: any, fieldMap: Record<string, string>) => {const mappedBody = { ...body };Object.entries(fieldMap).forEach(([from, to]) => {if (mappedBody[from] !== undefined) {mappedBody[to] = mappedBody[from];delete mappedBody[from];}});return mappedBody;
};// 示例: o1 模型字段映射
const o1FieldMap = {"max_tokens": "max_completion_tokens"
};
7. 错误处理与监控
统一错误处理
const handleModelError = (error: any, modelConfig: any) => {addLog.error(`Model ${modelConfig.model} error`, {error: error.message,provider: modelConfig.provider,requestUrl: modelConfig.requestUrl});// 根据错误类型返回友好提示if (error.code === 'insufficient_quota') {return '模型配额不足,请检查账户余额';}if (error.code === 'model_not_found') {return '模型不存在,请检查模型配置';}return `模型调用失败: ${error.message}`;
};
性能监控
const monitorModelPerformance = async (modelCall: () => Promise<any>) => {const startTime = Date.now();try {const result = await modelCall();const duration = Date.now() - startTime;addLog.info('Model call success', {duration,tokens: result.tokens,cost: result.cost});return result;} catch (error) {const duration = Date.now() - startTime;addLog.error('Model call failed', {duration,error: error.message});throw error;}
};
总结
FastGPT 大模型对接核心实现了统一、灵活、可扩展的模型管理架构:
- 统一抽象: 通过标准化接口屏蔽不同模型的差异
- 多模型支持: LLM、Embedding、ReRank、TTS、STT 全覆盖
- 灵活配置: 支持自定义请求路径、认证、字段映射
- 动态管理: 运行时模型配置更新和测试
- 错误处理: 完善的错误处理和性能监控
- 提供商适配: 支持主流 AI 服务提供商
这套架构为 FastGPT 提供了强大的 AI 能力集成基础,支持快速接入新的模型和提供商。