langflow中添加Siliconflow组件
组件代码:
import requests
from pydantic.v1 import SecretStr
from typing_extensions import override
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.field_typing.range_spec import RangeSpec
from langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput
# 定义 SiliconFlow 提供的模型列表
SILICONFLOW_MODELS = [
"Pro/deepseek-ai/DeepSeek-R1",
"Pro/deepseek-ai/DeepSeek-V3",
"deepseek-ai/DeepSeek-R1",
"deepseek-ai/DeepSeek-V3",
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
"deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
"SeedLLM/Seed-Rice-7B",
"Qwen/QVQ-72B-Preview",
"deepseek-ai/DeepSeek-V2.5",
"meta-llama/Llama-3.3-70B-Instruct",
"Qwen/QwQ-32B-Preview",
"Qwen/Qwen2.5-Coder-32B-Instruct",
"Qwen/Qwen2-VL-72B-Instruct",
"OpenGVLab/InternVL2-26B",
"Qwen/Qwen2.5-72B-Instruct-128K",
"deepseek-ai/deepseek-vl2",
"Qwen/Qwen2.5-72B-Instruct",
"Qwen/Qwen2.5-32B-Instruct",
"Qwen/Qwen2.5-14B-Instruct",
"Qwen/Qwen2.5-7B-Instruct",
"Qwen/Qwen2.5-Coder-7B-Instruct",
"Qwen/Qwen2-VL-7B-Instruct",
"OpenGVLab/InternVL2-8B",
"Qwen/Qwen2-7B-Instruct",
"Qwen/Qwen2-1.5B-Instruct",
"THUDM/glm-4-9b-chat",
"THUDM/chatglm3-6b",
"01-ai/Yi-1.5-9B-Chat-16K",
"01-ai/Yi-1.5-6B-Chat",
"01-ai/Yi-1.5-34B-Chat-16K",
"google/gemma-2-27b-it",
"google/gemma-2-9b-it",
"AIDC-AI/Marco-o1",
"LoRA/meta-llama/Meta-Llama-3.1-8B-Instruct",
"LoRA/Qwen/Qwen2.5-32B-Instruct",
"LoRA/Qwen/Qwen2.5-14B-Instruct",
"Vendor-A/Qwen/Qwen2.5-72B-Instruct",
"Pro/deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
"Pro/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
"Pro/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
"Pro/Qwen/Qwen2.5-Coder-7B-Instruct",
"Pro/Qwen/Qwen2-VL-7B-Instruct",
"Pro/OpenGVLab/InternVL2-8B",
"Pro/Qwen/Qwen2.5-7B-Instruct",
"Pro/meta-llama/Meta-Llama-3.1-8B-Instruct",
"LoRA/Qwen/Qwen2.5-72B-Instruct",
"Pro/Qwen/Qwen2-7B-Instruct",
"Pro/Qwen/Qwen2-1.5B-Instruct",
"LoRA/Qwen/Qwen2.5-7B-Instruct",
"Pro/THUDM/glm-4-9b-chat",
"Pro/google/gemma-2-9b-it"
]
class SiliconFlowModelComponent(LCModelComponent):
display_name = "SiliconFlow"
description = "Generate text using SiliconFlow LLMs."
icon = "google"
inputs = [
*LCModelComponent._base_inputs,
IntInput(
name="max_tokens",
display_name="Max Tokens",
advanced=True,
info="Maximum number of tokens to generate. Set to 0 for unlimited.",
range_spec=RangeSpec(min=0, max=128000),
),
DictInput(
name="model_kwargs",
display_name="Model Kwargs",
advanced=True,
info="Additional keyword arguments to pass to the model.",
),
BoolInput(
name="json_mode",
display_name="JSON Mode",
advanced=True,
info="If True, it will output JSON regardless of passing a schema.",
),
DropdownInput(
name="model_name",
display_name="Model Name",
info="SiliconFlow model to use",
options=SILICONFLOW_MODELS,
value="deepseek-ai/DeepSeek-R1",
refresh_button=True,
),
StrInput(
name="api_base",
display_name="SiliconFlow API Base",
advanced=True,
info="Base URL for API requests. Defaults to https://api.siliconflow.cn",
value="https://api.siliconflow.cn",
),
SecretStrInput(
name="api_key",
display_name="SiliconFlow API Key",
info="The SiliconFlow API Key",
advanced=False,
required=True,
),
SliderInput(
name="temperature",
display_name="Temperature",
info="Controls randomness in responses",
value=1.0,
range_spec=RangeSpec(min=0, max=2, step=0.01),
),
IntInput(
name="seed",
display_name="Seed",
info="The seed controls the reproducibility of the job.",
advanced=True,
value=1,
),
]
def get_models(self) -> list[str]:
if not self.api_key:
return SILICONFLOW_MODELS
url = f"{self.api_base}/models"
headers = {"Authorization": f"Bearer {self.api_key}", "Accept": "application/json"}
try:
response = requests.get(url, headers=headers, timeout=10)
response.raise_for_status()
model_list = response.json()
return [model["id"] for model in model_list.get("data", [])]
except requests.RequestException as e:
self.status = f"Error fetching models: {e}"
return SILICONFLOW_MODELS
@override
def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):
if field_name in {"api_key", "api_base", "model_name"}:
models = self.get_models()
build_config["model_name"]["options"] = models
return build_config
def build_model(self) -> LanguageModel:
try:
from langchain_openai import ChatOpenAI
except ImportError as e:
msg = "langchain-openai not installed. Please install with `pip install langchain-openai`"
raise ImportError(msg) from e
api_key = SecretStr(self.api_key).get_secret_value() if self.api_key else None
output = ChatOpenAI(
model=self.model_name,
temperature=self.temperature if self.temperature is not None else 0.1,
max_tokens=self.max_tokens or None,
model_kwargs=self.model_kwargs or {},
base_url=self.api_base,
api_key=api_key,
streaming=self.stream if hasattr(self, "stream") else False,
seed=self.seed,
)
if self.json_mode:
output = output.bind(response_format={"type": "json_object"})
return output
def _get_exception_message(self, e: Exception):
"""Get message from SiliconFlow API exception."""
try:
from openai import BadRequestError
if isinstance(e, BadRequestError):
message = e.body.get("message")
if message:
return message
except ImportError:
pass
return None