diff --git a/apps/llm/function.py b/apps/llm/function.py
index 62b5f93bc0aeb8334ffff3bbbe902e87d16edea4..07497e8aef37b553412a950731146c99462d03bf 100644
--- a/apps/llm/function.py
+++ b/apps/llm/function.py
@@ -32,11 +32,19 @@ _CLASS_DICT: dict[LLMProvider, type[BaseProvider]] = {
class FunctionLLM:
"""用于FunctionCall的模型"""
- timeout: float = 30.0
-
def __init__(self, llm_config: LLMData | None = None) -> None:
- pass
+ """初始化大模型客户端"""
+ if not llm_config:
+ err = "[FunctionLLM] 未设置LLM配置"
+ _logger.error(err)
+ raise RuntimeError(err)
+
+ if llm_config.provider not in _CLASS_DICT:
+ err = "[FunctionLLM] 未支持的LLM类型: %s", llm_config.provider
+ _logger.error(err)
+ raise RuntimeError(err)
+ self._provider = _CLASS_DICT[llm_config.provider](llm_config)
@staticmethod
async def process_response(response: str) -> str:
@@ -116,11 +124,9 @@ class JsonGenerator:
)
self._err_info = ""
-
- async def _assemble_message(self) -> str:
- """组装消息"""
+ async def _single_trial(self, max_tokens: int | None = None, temperature: float | None = None) -> dict[str, Any]:
+ """单次尝试"""
# 检查类型
- function_call = self._llm.config.functionCallBackend == FunctionCallBackend.FUNCTION_CALL
# 渲染模板
template = self._env.from_string(JSON_GEN_BASIC)
@@ -133,9 +139,6 @@ class JsonGenerator:
err_info=self._err_info,
)
- async def _single_trial(self, max_tokens: int | None = None, temperature: float | None = None) -> dict[str, Any]:
- """单次尝试"""
- prompt = await self._assemble_message()
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
diff --git a/apps/llm/providers/base.py b/apps/llm/providers/base.py
index 02c2b657175c8558d50e4f7277ed2983c3db91e7..7067c02017ed4e52abb7f35be83824004e935222 100644
--- a/apps/llm/providers/base.py
+++ b/apps/llm/providers/base.py
@@ -34,6 +34,8 @@ class BaseProvider:
self._timeout: float = LLM_TIMEOUT
self.full_thinking: str = ""
self.full_answer: str = ""
+ self.input_tokens: int = 0
+ self.output_tokens: int = 0
self._check_type()
self._init_client()
diff --git a/apps/llm/providers/ollama.py b/apps/llm/providers/ollama.py
index a407feacda93d5405751abe2a107b6944d01f9e2..92bcbc6e13283812c694ca488d1bb826304644db 100644
--- a/apps/llm/providers/ollama.py
+++ b/apps/llm/providers/ollama.py
@@ -54,8 +54,6 @@ class OllamaProvider(BaseProvider):
},
timeout=self._timeout,
)
- self.input_tokens = 0
- self.output_tokens = 0
def _process_usage_data(self, last_chunk: ChatResponse | None, messages: list[dict[str, str]]) -> None:
"""处理最后一个chunk的usage数据"""
@@ -80,11 +78,16 @@ class OllamaProvider(BaseProvider):
self, messages: list[dict[str, str]],
*, include_thinking: bool = False,
) -> AsyncGenerator[LLMChunk, None]:
+ # 检查能力
if not self._allow_chat:
err = "[OllamaProvider] 当前模型不支持Chat"
_logger.error(err)
raise RuntimeError(err)
+ # 初始化Token计数
+ self.input_tokens = 0
+ self.output_tokens = 0
+
# 检查消息
messages = self._validate_messages(messages)
diff --git a/apps/llm/providers/openai.py b/apps/llm/providers/openai.py
index bab1ca0aa2144509bd5827e17e9bdd84dbf65ebb..27d0e5a8fee10d24e17b0939e59763bfb5bb28c1 100644
--- a/apps/llm/providers/openai.py
+++ b/apps/llm/providers/openai.py
@@ -60,9 +60,6 @@ class OpenAIProvider(BaseProvider):
api_key=self.config.apiKey,
timeout=self._timeout,
)
- # 初始化Token计数
- self.input_tokens = 0
- self.output_tokens = 0
def _handle_usage_chunk(self, chunk: ChatCompletionChunk | None, messages: list[dict[str, str]]) -> None:
"""处理包含usage信息的chunk"""
@@ -91,11 +88,16 @@ class OpenAIProvider(BaseProvider):
*, include_thinking: bool = False,
) -> AsyncGenerator[LLMChunk, None]:
"""聊天"""
+ # 检查能力
if not self._allow_chat:
err = "[OpenAIProvider] 当前模型不支持Chat"
_logger.error(err)
raise RuntimeError(err)
+ # 初始化Token计数
+ self.input_tokens = 0
+ self.output_tokens = 0
+
# 检查消息
messages = self._validate_messages(messages)
diff --git a/apps/llm/reasoning.py b/apps/llm/reasoning.py
index 56dadfc027112d5b83e3cb935905e33920c7fa9d..fc2d3c54b3b9dba2cd70d05b07b77bded8ccc061 100644
--- a/apps/llm/reasoning.py
+++ b/apps/llm/reasoning.py
@@ -5,14 +5,13 @@ import logging
from collections.abc import AsyncGenerator
from apps.models.llm import LLMData
-from apps.schemas.llm import LLMProvider
+from apps.schemas.llm import LLMChunk, LLMProvider
from .providers import (
BaseProvider,
OllamaProvider,
OpenAIProvider,
)
-from .token import TokenCalculator
_logger = logging.getLogger(__name__)
_CLASS_DICT: dict[LLMProvider, type[BaseProvider]] = {
@@ -24,74 +23,47 @@ _CLASS_DICT: dict[LLMProvider, type[BaseProvider]] = {
class ReasoningLLM:
"""调用用于问答的大模型"""
- input_tokens: int = 0
- output_tokens: int = 0
-
- def __init__(self, llm_config: LLMData | None = None) -> None:
+ def __init__(self, llm_config: LLMData | None) -> None:
"""判断配置文件里用了哪种大模型;初始化大模型客户端"""
+ if not llm_config:
+ err = "[ReasoningLLM] 未设置问答LLM"
+ _logger.error(err)
+ raise RuntimeError(err)
+
+ if llm_config.provider not in _CLASS_DICT:
+ err = "[ReasoningLLM] 未支持的问答LLM类型: %s", llm_config.provider
+ _logger.error(err)
+ raise RuntimeError(err)
+
+ self._provider = _CLASS_DICT[llm_config.provider](llm_config)
- async def call( # noqa: C901, PLR0912, PLR0913
+ async def call(
self,
messages: list[dict[str, str]],
- max_tokens: int | None = None,
- temperature: float | None = None,
*,
+ include_thinking: bool = True,
streaming: bool = True,
- result_only: bool = True,
- model: str | None = None,
- ) -> AsyncGenerator[str, None]:
+ ) -> AsyncGenerator[LLMChunk, None]:
"""调用大模型,分为流式和非流式两种"""
- # 检查max_tokens和temperature
- if max_tokens is None:
- max_tokens = self.config.maxToken
- if temperature is None:
- temperature = self.config.temperature
- if model is None:
- model = self.config.modelName
- stream = await self._create_stream(msg_list, max_tokens, temperature, model)
- reasoning = ReasoningContent()
- reasoning_content = ""
- result = ""
-
- async for chunk in stream:
- # 如果包含统计信息
- if chunk.usage:
- self.input_tokens = chunk.usage.prompt_tokens
- self.output_tokens = chunk.usage.completion_tokens
- # 如果没有Choices
- if not chunk.choices:
+ if streaming:
+ async for chunk in await self._provider.chat(messages, include_thinking=include_thinking):
+ yield chunk
+ else:
+ # 非流式模式下,需要收集所有内容
+ async for _ in await self._provider.chat(messages, include_thinking=include_thinking):
continue
+ # 直接yield最终结果
+ yield LLMChunk(
+ content=self._provider.full_answer,
+ reasoning_content=self._provider.full_thinking,
+ )
- # 处理chunk
- if reasoning.is_first_chunk:
- reason, text = reasoning.process_first_chunk(chunk)
- else:
- reason, text = reasoning.process_chunk(chunk)
-
- # 推送消息
- if streaming:
- if reason and not result_only:
- yield reason
- if text:
- yield text
-
- # 整理结果
- reasoning_content += reason
- result += text
-
- if not streaming:
- if not result_only:
- yield reasoning_content
- yield result
-
- _logger.info("[Reasoning] 推理内容: %s\n\n%s", reasoning_content, result)
+ @property
+ def input_tokens(self) -> int:
+ """获取输入token数"""
+ return self._provider.input_tokens
- # 更新token统计
- if self.input_tokens == 0 or self.output_tokens == 0:
- self.input_tokens = TokenCalculator().calculate_token_length(
- messages,
- )
- self.output_tokens = TokenCalculator().calculate_token_length(
- [{"role": "assistant", "content": result}],
- pure_text=True,
- )
+ @property
+ def output_tokens(self) -> int:
+ """获取输出token数"""
+ return self._provider.output_tokens
diff --git a/apps/models/llm.py b/apps/models/llm.py
index d8199977e1de99eff6977cbcea467a6cd1680bd9..e8fbb5f94a27ba4b9105be08db686bfcc9da1904 100644
--- a/apps/models/llm.py
+++ b/apps/models/llm.py
@@ -24,7 +24,9 @@ class LLMData(Base):
"""LLM API Key"""
modelName: Mapped[str] = mapped_column(String(300), nullable=False) # noqa: N815
"""LLM模型名"""
- maxToken: Mapped[int] = mapped_column(Integer, default=8192, nullable=False) # noqa: N815
+ ctxLength: Mapped[int] = mapped_column(Integer, default=8192, nullable=False) # noqa: N815
+ """LLM总上下文长度"""
+ maxToken: Mapped[int] = mapped_column(Integer, default=2048, nullable=False) # noqa: N815
"""LLM最大Token数量"""
temperature: Mapped[float] = mapped_column(Float, default=0.7, nullable=False)
"""LLM温度"""
diff --git a/apps/routers/conversation.py b/apps/routers/conversation.py
index 269f4ff0137be7da64a15df0ebeff11386db9119..823a322ae3a00787686e778e738017c620d9fb41 100644
--- a/apps/routers/conversation.py
+++ b/apps/routers/conversation.py
@@ -180,7 +180,7 @@ async def update_conversation(
conversationId=conv.id,
title=conv.title,
docCount=await DocumentManager.get_doc_count(user_sub, conv.id),
- createdTime=datetime.fromtimestamp(conv.createdAt, tz=pytz.timezone("Asia/Shanghai")).strftime(
+ createdTime=datetime.fromtimestamp(conv.createdAt, tz=UTC).strftime(
"%Y-%m-%d %H:%M:%S",
),
appId=conv.appId if conv.appId else "",
diff --git a/apps/routers/document.py b/apps/routers/document.py
index bf69b4c6ad5a11d32aa790401ff052da248634d4..8c33d5738b152bef36ffd627e1cfa9a9b4a00ba8 100644
--- a/apps/routers/document.py
+++ b/apps/routers/document.py
@@ -11,16 +11,16 @@ from fastapi import APIRouter, Depends, Path, Request, UploadFile, status
from fastapi.responses import JSONResponse
from apps.dependency import verify_personal_token, verify_session
-from apps.schemas.enum_var import DocumentStatus
-from apps.schemas.response_data import (
+from apps.schemas.document import (
ConversationDocumentItem,
ConversationDocumentMsg,
ConversationDocumentRsp,
- ResponseData,
UploadDocumentMsg,
UploadDocumentMsgItem,
UploadDocumentRsp,
)
+from apps.schemas.enum_var import DocumentStatus
+from apps.schemas.response_data import ResponseData
from apps.services.conversation import ConversationManager
from apps.services.document import DocumentManager
from apps.services.knowledge_base import KnowledgeBaseService
diff --git a/apps/routers/flow.py b/apps/routers/flow.py
index d4389b9b38bd2bd3cb1bc2629462c8eb98dc24b9..251d08dcb0074c2039a625368a03e571f2662507 100644
--- a/apps/routers/flow.py
+++ b/apps/routers/flow.py
@@ -16,10 +16,9 @@ from apps.schemas.response_data import (
FlowStructureGetRsp,
FlowStructurePutMsg,
FlowStructurePutRsp,
- NodeServiceListMsg,
- NodeServiceListRsp,
ResponseData,
)
+from apps.schemas.service import NodeServiceListMsg, NodeServiceListRsp
from apps.services.appcenter import AppCenterManager
from apps.services.flow import FlowManager
from apps.services.flow_validate import FlowService
@@ -61,7 +60,7 @@ async def get_services(request: Request) -> NodeServiceListRsp:
status.HTTP_404_NOT_FOUND: {"model": ResponseData},
},
)
-async def get_flow(request: Request, appId: uuid.UUID, flowId: uuid.UUID) -> JSONResponse: # noqa: N803
+async def get_flow(request: Request, appId: uuid.UUID, flowId: str) -> JSONResponse: # noqa: N803
"""获取流拓扑结构"""
if not await AppCenterManager.validate_user_app_access(request.state.user_sub, appId):
return JSONResponse(
@@ -102,7 +101,7 @@ async def get_flow(request: Request, appId: uuid.UUID, flowId: uuid.UUID) -> JSO
async def put_flow(
request: Request,
appId: Annotated[uuid.UUID, Query()], # noqa: N803
- flowId: Annotated[uuid.UUID, Query()], # noqa: N803
+ flowId: Annotated[str, Query()], # noqa: N803
put_body: Annotated[PutFlowReq, Body()],
) -> JSONResponse:
"""修改流拓扑结构"""
@@ -117,7 +116,7 @@ async def put_flow(
)
put_body.flow = await FlowService.remove_excess_structure_from_flow(put_body.flow)
await FlowService.validate_flow_illegal(put_body.flow)
- put_body.flow.connectivity = await FlowService.validate_flow_connectivity(put_body.flow)
+ put_body.flow.check_status.connectivity = await FlowService.validate_flow_connectivity(put_body.flow)
try:
await FlowManager.put_flow_by_app_and_flow_id(appId, flowId, put_body.flow)
@@ -156,7 +155,7 @@ async def put_flow(
status.HTTP_404_NOT_FOUND: {"model": ResponseData},
},
)
-async def delete_flow(request: Request, appId: uuid.UUID, flowId: uuid.UUID) -> JSONResponse: # noqa: N803
+async def delete_flow(request: Request, appId: uuid.UUID, flowId: str) -> JSONResponse: # noqa: N803
"""删除流拓扑结构"""
if not await AppCenterManager.validate_app_belong_to_user(request.state.user_sub, appId):
return JSONResponse(
diff --git a/apps/routers/mcp_service.py b/apps/routers/mcp_service.py
index b6bc17e045c9e04859c22672e87ede361f21bcbb..16e8c8c418954841aad366b617eb528bbff38ffe 100644
--- a/apps/routers/mcp_service.py
+++ b/apps/routers/mcp_service.py
@@ -10,7 +10,7 @@ from fastapi.responses import JSONResponse
from apps.dependency.user import verify_admin, verify_personal_token, verify_session
from apps.schemas.enum_var import SearchType
from apps.schemas.mcp import ActiveMCPServiceRequest, UpdateMCPServiceRequest
-from apps.schemas.response_data import (
+from apps.schemas.mcp_service import (
ActiveMCPServiceRsp,
BaseMCPServiceOperationMsg,
DeleteMCPServiceRsp,
@@ -19,12 +19,12 @@ from apps.schemas.response_data import (
GetMCPServiceDetailRsp,
GetMCPServiceListMsg,
GetMCPServiceListRsp,
- ResponseData,
UpdateMCPServiceMsg,
UpdateMCPServiceRsp,
UploadMCPServiceIconMsg,
UploadMCPServiceIconRsp,
)
+from apps.schemas.response_data import ResponseData
from apps.services.mcp_service import MCPServiceManager
logger = logging.getLogger(__name__)
@@ -207,11 +207,13 @@ async def get_service_detail(
name=data.name,
description=data.description,
overview=config.overview,
- data=config.config.model_dump(by_alias=True, exclude_none=True),
+ data=config.model_dump(by_alias=True, exclude_none=True),
mcpType=config.mcpType,
)
else:
# 组装详情所需信息
+ # 从数据库获取工具列表替代data.tools
+ tools = await MCPServiceManager.get_mcp_tools(service_id)
detail = GetMCPServiceDetailMsg(
serviceId=service_id,
icon=icon,
@@ -219,7 +221,7 @@ async def get_service_detail(
description=data.description,
overview=config.overview,
status=data.status,
- tools=data.tools,
+ tools=tools,
)
return JSONResponse(
diff --git a/apps/routers/service.py b/apps/routers/service.py
index 66f929e3104cd5b8ac3452aaadc59b5deee028d4..ae5afdcd7e1c3a3033a31ae7682e4f4bef2492b7 100644
--- a/apps/routers/service.py
+++ b/apps/routers/service.py
@@ -11,20 +11,21 @@ from fastapi.responses import JSONResponse
from apps.dependency.user import verify_personal_token, verify_session
from apps.exceptions import InstancePermissionError
from apps.schemas.enum_var import SearchType
-from apps.schemas.response_data import (
+from apps.schemas.response_data import ResponseData
+from apps.schemas.service import (
BaseServiceOperationMsg,
ChangeFavouriteServiceMsg,
+ ChangeFavouriteServiceRequest,
ChangeFavouriteServiceRsp,
DeleteServiceRsp,
GetServiceDetailMsg,
GetServiceDetailRsp,
GetServiceListMsg,
GetServiceListRsp,
- ResponseData,
UpdateServiceMsg,
+ UpdateServiceRequest,
UpdateServiceRsp,
)
-from apps.schemas.service import ChangeFavouriteServiceRequest, UpdateServiceRequest
from apps.services.service import ServiceCenterManager
logger = logging.getLogger(__name__)
diff --git a/apps/scheduler/call/core.py b/apps/scheduler/call/core.py
index 34997a9d4c379196555ac8605bf2b2189b023f21..4f493ce2a106b854d4f6a0480e147310412486b8 100644
--- a/apps/scheduler/call/core.py
+++ b/apps/scheduler/call/core.py
@@ -188,14 +188,8 @@ class CoreCall(BaseModel):
async def _llm(self, messages: list[dict[str, Any]], *, streaming: bool = False) -> AsyncGenerator[str, None]:
"""Call可直接使用的LLM非流式调用"""
- if streaming:
- async for chunk in self._llm_obj.reasoning.call(messages, streaming=streaming):
- yield chunk
- else:
- result = ""
- async for chunk in self._llm_obj.reasoning.call(messages, streaming=streaming):
- result += chunk
- yield result
+ async for chunk in self._llm_obj.reasoning.call(messages, streaming=streaming):
+ yield chunk.content or ""
async def _json(self, messages: list[dict[str, Any]], schema: dict[str, Any]) -> dict[str, Any]:
diff --git a/apps/scheduler/mcp/plan.py b/apps/scheduler/mcp/plan.py
index 448a4081e3c9171fc1fddac3fba86b5a076ef397..f73ad3972bbac2ac10b7d614942739aea89b4956 100644
--- a/apps/scheduler/mcp/plan.py
+++ b/apps/scheduler/mcp/plan.py
@@ -60,10 +60,8 @@ class MCPPlanner:
async for chunk in self._llm.reasoning.call(
message,
streaming=False,
- temperature=0.07,
- result_only=True,
):
- result += chunk
+ result += chunk.content or ""
return result
@@ -105,8 +103,7 @@ class MCPPlanner:
async for chunk in self._llm.reasoning.call(
[{"role": "user", "content": prompt}],
streaming=False,
- temperature=0.07,
):
- result += chunk
+ result += chunk.content or ""
return result
diff --git a/apps/scheduler/mcp/select.py b/apps/scheduler/mcp/select.py
index 84d7f4d87bb51813c878df5d07dd5d669da25ecc..cb6737561177ec8d6141cf2e68a32738cf45a232 100644
--- a/apps/scheduler/mcp/select.py
+++ b/apps/scheduler/mcp/select.py
@@ -31,7 +31,7 @@ class MCPSelector:
]
result = ""
async for chunk in self._llm.reasoning.call(message):
- result += chunk
+ result += chunk.content or ""
return result
diff --git a/apps/scheduler/slot/parser/__init__.py b/apps/scheduler/slot/parser/__init__.py
index 7d4a577cf41fb782ab4fb779ccbaf0469f2ca9dd..35037b80acf8a824ad953068ff03c1eafbc70646 100644
--- a/apps/scheduler/slot/parser/__init__.py
+++ b/apps/scheduler/slot/parser/__init__.py
@@ -3,12 +3,10 @@
from .const import SlotConstParser
from .date import SlotDateParser
-from .default import SlotDefaultParser
from .timestamp import SlotTimestampParser
__all__ = [
"SlotConstParser",
"SlotDateParser",
- "SlotDefaultParser",
"SlotTimestampParser",
]
diff --git a/apps/scheduler/slot/parser/const.py b/apps/scheduler/slot/parser/const.py
index 3d9fadc57914ddd16f2a08a47ba5bb943426e05a..d722c1c609b0c1d94486e2531d6f7b4043281838 100644
--- a/apps/scheduler/slot/parser/const.py
+++ b/apps/scheduler/slot/parser/const.py
@@ -1,9 +1,12 @@
# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved.
"""固定值设置器"""
+from collections.abc import Generator
from typing import Any
from jsonschema import Validator
+from jsonschema._utils import equal
+from jsonschema.exceptions import ValidationError
from apps.schemas.enum_var import SlotType
@@ -15,14 +18,23 @@ class SlotConstParser:
name: str = "const"
@classmethod
- def convert(cls, data: Any, **kwargs) -> Any: # noqa: ANN003
+ def convert(cls, _data: Any, **kwargs) -> Any: # noqa: ANN003
"""
生成keyword的验证器
如果没有对应逻辑则不实现
"""
- raise NotImplementedError
+ return kwargs.get("const")
@classmethod
- def keyword_validate(cls, validator: Validator, keyword: str, instance: Any, schema: dict[str, Any]) -> bool:
+ def keyword_validate(
+ cls,
+ _validator: Validator,
+ keyword: str,
+ instance: Any,
+ _schema: dict[str, Any],
+ ) -> Generator[ValidationError, None, None]:
"""生成对应类型的验证器"""
+ if not equal(keyword, instance):
+ err = f"{instance!r} was expected"
+ yield ValidationError(err)
diff --git a/apps/scheduler/slot/parser/default.py b/apps/scheduler/slot/parser/default.py
deleted file mode 100644
index 2405bf87930a37742bd71c4f4f1ee68911b72311..0000000000000000000000000000000000000000
--- a/apps/scheduler/slot/parser/default.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved.
-"""默认值设置器"""
-
-from typing import Any
-
-from jsonschema import Validator
-
-from apps.schemas.enum_var import SlotType
-
-
-class SlotDefaultParser:
- """给字段设置默认值"""
-
- type: SlotType = SlotType.KEYWORD
- name: str = "default"
-
- @classmethod
- def convert(cls, data: Any, **kwargs) -> Any: # noqa: ANN003
- """
- 给字段设置默认值
-
- 如果没有对应逻辑则不实现
- """
- raise NotImplementedError
-
- @classmethod
- def keyword_validate(cls, validator: Validator, keyword: str, instance: Any, schema: dict[str, Any]) -> bool:
- """给字段设置默认值"""
diff --git a/apps/scheduler/slot/slot.py b/apps/scheduler/slot/slot.py
index 565261dce7744bb362fad06112d201c3e264e9f4..8c3ba78306955a2846f51d1f2eabacdc56920231 100644
--- a/apps/scheduler/slot/slot.py
+++ b/apps/scheduler/slot/slot.py
@@ -17,7 +17,6 @@ from apps.scheduler.call.choice.schema import Type
from apps.scheduler.slot.parser import (
SlotConstParser,
SlotDateParser,
- SlotDefaultParser,
SlotTimestampParser,
)
from apps.scheduler.slot.util import escape_path, patch_json
@@ -33,7 +32,6 @@ _TYPE_CHECKER = [
_FORMAT_CHECKER = []
_KEYWORD_CHECKER = {
"const": SlotConstParser.keyword_validate,
- "default": SlotDefaultParser.keyword_validate,
}
# 各类转换器
@@ -43,7 +41,6 @@ _TYPE_CONVERTER = [
]
_KEYWORD_CONVERTER = {
"const": SlotConstParser,
- "default": SlotDefaultParser,
}
diff --git a/apps/schemas/appcenter.py b/apps/schemas/appcenter.py
index deb8c20fa14c557455d147a3bd8df9baf2cc96fc..63f1251dba62a761fa2d19d7c590bbad2562f146 100644
--- a/apps/schemas/appcenter.py
+++ b/apps/schemas/appcenter.py
@@ -72,7 +72,7 @@ class AppData(BaseModel):
class CreateAppRequest(AppData):
"""POST /api/app 请求数据结构"""
- app_id: str | None = Field(None, alias="appId", description="应用ID")
+ app_id: uuid.UUID | None = Field(None, alias="appId", description="应用ID")
class ChangeFavouriteAppRequest(BaseModel):
diff --git a/apps/schemas/comment.py b/apps/schemas/comment.py
index f3668a87af013efb4e8991780640f256acb7fadf..db5baaaa00c7456a21be0284fc33567701df4d40 100644
--- a/apps/schemas/comment.py
+++ b/apps/schemas/comment.py
@@ -1,8 +1,9 @@
-from apps.schemas.enum_var import CommentType
-
+"""评论相关的数据结构"""
from pydantic import BaseModel, Field
+from apps.schemas.enum_var import CommentType
+
class AddCommentData(BaseModel):
"""添加评论"""
@@ -11,4 +12,4 @@ class AddCommentData(BaseModel):
comment: CommentType
dislike_reason: str = Field(default="", max_length=200)
reason_link: str = Field(default="", max_length=200)
- reason_description: str = Field(default="", max_length=500)
\ No newline at end of file
+ reason_description: str = Field(default="", max_length=500)
diff --git a/apps/schemas/conversation.py b/apps/schemas/conversation.py
index ec9db6e69a37b00133d2b0df63c6812cdb7041cf..8a5507b9349fd5a5178633c76d4071591ad4b473 100644
--- a/apps/schemas/conversation.py
+++ b/apps/schemas/conversation.py
@@ -1,3 +1,5 @@
+import uuid
+
from pydantic import BaseModel, Field
@@ -10,4 +12,4 @@ class ChangeConversationData(BaseModel):
class DeleteConversationData(BaseModel):
"""删除会话"""
- conversation_list: list[uuid.UUID] = Field(alias="conversationList")
\ No newline at end of file
+ conversation_list: list[uuid.UUID] = Field(alias="conversationList")
diff --git a/apps/schemas/document.py b/apps/schemas/document.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e4d9901d66a104be8e178bc127553e28e6a751e
--- /dev/null
+++ b/apps/schemas/document.py
@@ -0,0 +1,65 @@
+# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved.
+"""FastAPI 返回数据结构 - 文档相关"""
+
+from pydantic import BaseModel, Field
+
+from .enum_var import DocumentStatus
+
+
+class ConversationDocumentItem(BaseModel):
+ """GET /api/document/{conversation_id} Result内元素数据结构"""
+
+ id: str = Field(alias="_id", default="")
+ user_sub: None = None
+ status: DocumentStatus
+ conversation_id: None = None
+
+ class Config:
+ """配置"""
+
+ populate_by_name = True
+
+
+class ConversationDocumentMsg(BaseModel):
+ """GET /api/document/{conversation_id} Result数据结构"""
+
+ documents: list[ConversationDocumentItem] = []
+
+
+class ConversationDocumentRsp(BaseModel):
+ """GET /api/document/{conversation_id} 返回数据结构"""
+
+ code: int
+ message: str
+ result: ConversationDocumentMsg
+
+
+class UploadDocumentMsgItem(BaseModel):
+ """POST /api/document/{conversation_id} 返回数据结构"""
+
+ id: str = Field(alias="_id", default="")
+ user_sub: None = None
+ name: str = Field(default="", description="文档名称")
+ type: str = Field(default="", description="文档类型")
+ size: float = Field(default=0.0, description="文档大小")
+ created_at: None = None
+ conversation_id: None = None
+
+ class Config:
+ """配置"""
+
+ populate_by_name = True
+
+
+class UploadDocumentMsg(BaseModel):
+ """POST /api/document/{conversation_id} 返回数据结构"""
+
+ documents: list[UploadDocumentMsgItem]
+
+
+class UploadDocumentRsp(BaseModel):
+ """POST /api/document/{conversation_id} 返回数据结构"""
+
+ code: int
+ message: str
+ result: UploadDocumentMsg
diff --git a/apps/schemas/mcp_service.py b/apps/schemas/mcp_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2af0824ef64f9af162e6f192cd87bc037a8a412
--- /dev/null
+++ b/apps/schemas/mcp_service.py
@@ -0,0 +1,112 @@
+# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved.
+"""MCP 服务相关数据结构"""
+
+import uuid
+from typing import Any
+
+from pydantic import BaseModel, Field
+
+from apps.models.mcp import MCPInstallStatus, MCPTools, MCPType
+from apps.schemas.response_data import ResponseData
+
+
+class MCPServiceCardItem(BaseModel):
+ """插件中心:MCP服务卡片数据结构"""
+
+ mcpservice_id: str = Field(..., alias="mcpserviceId", description="mcp服务ID")
+ name: str = Field(..., description="mcp服务名称")
+ description: str = Field(..., description="mcp服务简介")
+ icon: str = Field(..., description="mcp服务图标")
+ author: str = Field(..., description="mcp服务作者")
+ is_active: bool = Field(default=False, alias="isActive", description="mcp服务是否激活")
+ status: MCPInstallStatus = Field(default=MCPInstallStatus.INSTALLING, description="mcp服务状态")
+
+
+class BaseMCPServiceOperationMsg(BaseModel):
+ """插件中心:MCP服务操作Result数据结构"""
+
+ service_id: str = Field(..., alias="serviceId", description="服务ID")
+
+
+class GetMCPServiceListMsg(BaseModel):
+ """GET /api/service Result数据结构"""
+
+ current_page: int = Field(..., alias="currentPage", description="当前页码")
+ services: list[MCPServiceCardItem] = Field(..., description="解析后的服务列表")
+
+
+class GetMCPServiceListRsp(ResponseData):
+ """GET /api/service 返回数据结构"""
+
+ result: GetMCPServiceListMsg = Field(..., title="Result")
+
+
+class UpdateMCPServiceMsg(BaseModel):
+ """插件中心:MCP服务属性数据结构"""
+
+ service_id: uuid.UUID = Field(..., alias="serviceId", description="MCP服务ID")
+ name: str = Field(..., description="MCP服务名称")
+
+
+class UpdateMCPServiceRsp(ResponseData):
+ """POST /api/mcp_service 返回数据结构"""
+
+ result: UpdateMCPServiceMsg = Field(..., title="Result")
+
+
+class UploadMCPServiceIconMsg(BaseModel):
+ """POST /api/mcp_service/icon Result数据结构"""
+
+ service_id: str = Field(..., alias="serviceId", description="MCP服务ID")
+ url: str = Field(..., description="图标URL")
+
+
+class UploadMCPServiceIconRsp(ResponseData):
+ """POST /api/mcp_service/icon 返回数据结构"""
+
+ result: UploadMCPServiceIconMsg = Field(..., title="Result")
+
+
+class GetMCPServiceDetailMsg(BaseModel):
+ """GET /api/mcp_service/{serviceId} Result数据结构"""
+
+ service_id: str = Field(..., alias="serviceId", description="MCP服务ID")
+ icon: str = Field(description="图标", default="")
+ name: str = Field(..., description="MCP服务名称")
+ description: str = Field(description="MCP服务描述")
+ overview: str = Field(description="MCP服务概述")
+ tools: list[MCPTools] = Field(description="MCP服务Tools列表", default=[])
+ status: MCPInstallStatus = Field(
+ description="MCP服务状态",
+ default=MCPInstallStatus.INIT,
+ )
+
+
+class EditMCPServiceMsg(BaseModel):
+ """编辑MCP服务"""
+
+ service_id: str = Field(..., alias="serviceId", description="MCP服务ID")
+ icon: str = Field(description="图标", default="")
+ name: str = Field(..., description="MCP服务名称")
+ description: str = Field(description="MCP服务描述")
+ overview: str = Field(description="MCP服务概述")
+ data: dict[str, Any] = Field(description="MCP服务配置")
+ mcp_type: MCPType = Field(alias="mcpType", description="MCP 类型")
+
+
+class GetMCPServiceDetailRsp(ResponseData):
+ """GET /api/service/{serviceId} 返回数据结构"""
+
+ result: GetMCPServiceDetailMsg | EditMCPServiceMsg = Field(..., title="Result")
+
+
+class DeleteMCPServiceRsp(ResponseData):
+ """DELETE /api/service/{serviceId} 返回数据结构"""
+
+ result: BaseMCPServiceOperationMsg = Field(..., title="Result")
+
+
+class ActiveMCPServiceRsp(ResponseData):
+ """POST /api/mcp/active/{serviceId} 返回数据结构"""
+
+ result: BaseMCPServiceOperationMsg = Field(..., title="Result")
diff --git a/apps/schemas/request_data.py b/apps/schemas/request_data.py
index 6b08614f8822ee583d414c9171009490c3d74ed5..aa588728b21bb5c4fd6a844e3a654a78ab6561b3 100644
--- a/apps/schemas/request_data.py
+++ b/apps/schemas/request_data.py
@@ -2,11 +2,13 @@
"""FastAPI 请求体"""
import uuid
+from typing import Any
from pydantic import BaseModel, Field
from .enum_var import LanguageType
from .flow_topology import FlowItem
+from .llm import LLMProvider
from .message import FlowParams
@@ -50,11 +52,13 @@ class UpdateLLMReq(BaseModel):
"""更新大模型请求体"""
llm_id: str | None = Field(default=None, description="大模型ID", alias="id")
- icon: str = Field(description="图标", default="")
openai_base_url: str = Field(default="", description="OpenAI API Base URL", alias="openaiBaseUrl")
openai_api_key: str = Field(default="", description="OpenAI API Key", alias="openaiApiKey")
model_name: str = Field(default="", description="模型名称", alias="modelName")
max_tokens: int = Field(default=8192, description="最大token数", alias="maxTokens")
+ provider: LLMProvider = Field(description="大模型提供商", alias="provider")
+ ctx_length: int = Field(description="上下文长度", alias="ctxLength")
+ extra_data: dict[str, Any] | None = Field(default=None, description="额外数据", alias="extraData")
class UpdateUserSelectedLLMReq(BaseModel):
diff --git a/apps/schemas/response_data.py b/apps/schemas/response_data.py
index ce1c003cdb15a6960cff86cb5c334e49be0af7c3..db00fe71adf6313623902eed43b3970d58fd7831 100644
--- a/apps/schemas/response_data.py
+++ b/apps/schemas/response_data.py
@@ -6,15 +6,10 @@ from typing import Any
from pydantic import BaseModel, Field
-from apps.models.mcp import MCPInstallStatus, MCPTools
-
from .appcenter import AppCenterCardItem, AppData
-from .enum_var import DocumentStatus
from .flow_topology import (
FlowItem,
- NodeServiceItem,
)
-from .mcp import MCPType
from .parameters import (
BoolOperate,
DictOperate,
@@ -121,57 +116,6 @@ class RecordListRsp(ResponseData):
result: RecordListMsg
-class ConversationDocumentItem(Document):
- """GET /api/document/{conversation_id} Result内元素数据结构"""
-
- id: str = Field(alias="_id", default="")
- user_sub: None = None
- status: DocumentStatus
- conversation_id: None = None
-
- class Config:
- """配置"""
-
- populate_by_name = True
-
-
-class ConversationDocumentMsg(BaseModel):
- """GET /api/document/{conversation_id} Result数据结构"""
-
- documents: list[ConversationDocumentItem] = []
-
-
-class ConversationDocumentRsp(ResponseData):
- """GET /api/document/{conversation_id} 返回数据结构"""
-
- result: ConversationDocumentMsg
-
-
-class UploadDocumentMsgItem(Document):
- """POST /api/document/{conversation_id} 返回数据结构"""
-
- id: str = Field(alias="_id", default="")
- user_sub: None = None
- created_at: None = None
- conversation_id: None = None
-
- class Config:
- """配置"""
-
- populate_by_name = True
-
-
-class UploadDocumentMsg(BaseModel):
- """POST /api/document/{conversation_id} 返回数据结构"""
-
- documents: list[UploadDocumentMsgItem]
-
-
-class UploadDocumentRsp(ResponseData):
- """POST /api/document/{conversation_id} 返回数据结构"""
-
- result: UploadDocumentMsg
-
class OidcRedirectMsg(BaseModel):
"""GET /api/auth/redirect Result数据结构"""
@@ -200,7 +144,7 @@ class ListTeamKnowledgeRsp(ResponseData):
class BaseAppOperationMsg(BaseModel):
"""基础应用操作Result数据结构"""
- app_id: str = Field(..., alias="appId", description="应用ID")
+ app_id: uuid.UUID = Field(..., alias="appId", description="应用ID")
class BaseAppOperationRsp(ResponseData):
@@ -269,201 +213,6 @@ class GetRecentAppListRsp(ResponseData):
result: RecentAppList
-class ServiceCardItem(BaseModel):
- """语义接口中心:服务卡片数据结构"""
-
- service_id: uuid.UUID = Field(..., alias="serviceId", description="服务ID")
- name: str = Field(..., description="服务名称")
- description: str = Field(..., description="服务简介")
- icon: str = Field(..., description="服务图标")
- author: str = Field(..., description="服务作者")
- favorited: bool = Field(..., description="是否已收藏")
-
-
-class ServiceApiData(BaseModel):
- """语义接口中心:服务 API 接口属性数据结构"""
-
- name: str = Field(..., description="接口名称")
- path: str = Field(..., description="接口路径")
- description: str = Field(..., description="接口描述")
-
-
-class BaseServiceOperationMsg(BaseModel):
- """语义接口中心:基础服务操作Result数据结构"""
-
- service_id: uuid.UUID = Field(..., alias="serviceId", description="服务ID")
-
-
-class GetServiceListMsg(BaseModel):
- """GET /api/service Result数据结构"""
-
- current_page: int = Field(..., alias="currentPage", description="当前页码")
- total_count: int = Field(..., alias="totalCount", description="总服务数")
- services: list[ServiceCardItem] = Field(..., description="解析后的服务列表")
-
-
-class GetServiceListRsp(ResponseData):
- """GET /api/service 返回数据结构"""
-
- result: GetServiceListMsg = Field(..., title="Result")
-
-
-class UpdateServiceMsg(BaseModel):
- """语义接口中心:服务属性数据结构"""
-
- service_id: uuid.UUID = Field(..., alias="serviceId", description="服务ID")
- name: str = Field(..., description="服务名称")
- apis: list[ServiceApiData] = Field(..., description="解析后的接口列表")
-
-
-class UpdateServiceRsp(ResponseData):
- """POST /api/service 返回数据结构"""
-
- result: UpdateServiceMsg = Field(..., title="Result")
-
-
-class GetServiceDetailMsg(BaseModel):
- """GET /api/service/{serviceId} Result数据结构"""
-
- service_id: uuid.UUID = Field(..., alias="serviceId", description="服务ID")
- name: str = Field(..., description="服务名称")
- apis: list[ServiceApiData] | None = Field(default=None, description="解析后的接口列表")
- data: dict[str, Any] | None = Field(default=None, description="YAML 内容数据对象")
-
-
-class GetServiceDetailRsp(ResponseData):
- """GET /api/service/{serviceId} 返回数据结构"""
-
- result: GetServiceDetailMsg = Field(..., title="Result")
-
-
-class DeleteServiceRsp(ResponseData):
- """DELETE /api/service/{serviceId} 返回数据结构"""
-
- result: BaseServiceOperationMsg = Field(..., title="Result")
-
-
-class ChangeFavouriteServiceMsg(BaseModel):
- """PUT /api/service/{serviceId} Result数据结构"""
-
- service_id: uuid.UUID = Field(..., alias="serviceId", description="服务ID")
- favorited: bool = Field(..., description="是否已收藏")
-
-
-class ChangeFavouriteServiceRsp(ResponseData):
- """PUT /api/service/{serviceId} 返回数据结构"""
-
- result: ChangeFavouriteServiceMsg = Field(..., title="Result")
-
-
-class NodeServiceListMsg(BaseModel):
- """GET /api/flow/service result"""
-
- services: list[NodeServiceItem] = Field(description="服务列表", default=[])
-
-
-class NodeServiceListRsp(ResponseData):
- """GET /api/flow/service 返回数据结构"""
-
- result: NodeServiceListMsg
-
-
-class MCPServiceCardItem(BaseModel):
- """插件中心:MCP服务卡片数据结构"""
-
- mcpservice_id: str = Field(..., alias="mcpserviceId", description="mcp服务ID")
- name: str = Field(..., description="mcp服务名称")
- description: str = Field(..., description="mcp服务简介")
- icon: str = Field(..., description="mcp服务图标")
- author: str = Field(..., description="mcp服务作者")
- is_active: bool = Field(default=False, alias="isActive", description="mcp服务是否激活")
- status: MCPInstallStatus = Field(default=MCPInstallStatus.INSTALLING, description="mcp服务状态")
-
-
-class BaseMCPServiceOperationMsg(BaseModel):
- """插件中心:MCP服务操作Result数据结构"""
-
- service_id: str = Field(..., alias="serviceId", description="服务ID")
-
-
-class GetMCPServiceListMsg(BaseModel):
- """GET /api/service Result数据结构"""
-
- current_page: int = Field(..., alias="currentPage", description="当前页码")
- services: list[MCPServiceCardItem] = Field(..., description="解析后的服务列表")
-
-
-class GetMCPServiceListRsp(ResponseData):
- """GET /api/service 返回数据结构"""
-
- result: GetMCPServiceListMsg = Field(..., title="Result")
-
-
-class UpdateMCPServiceMsg(BaseModel):
- """插件中心:MCP服务属性数据结构"""
-
- service_id: uuid.UUID = Field(..., alias="serviceId", description="MCP服务ID")
- name: str = Field(..., description="MCP服务名称")
-
-
-class UpdateMCPServiceRsp(ResponseData):
- """POST /api/mcp_service 返回数据结构"""
-
- result: UpdateMCPServiceMsg = Field(..., title="Result")
-
-
-class UploadMCPServiceIconMsg(BaseModel):
- """POST /api/mcp_service/icon Result数据结构"""
-
- service_id: str = Field(..., alias="serviceId", description="MCP服务ID")
- url: str = Field(..., description="图标URL")
-
-
-class UploadMCPServiceIconRsp(ResponseData):
- """POST /api/mcp_service/icon 返回数据结构"""
-
- result: UploadMCPServiceIconMsg = Field(..., title="Result")
-
-
-class GetMCPServiceDetailMsg(BaseModel):
- """GET /api/mcp_service/{serviceId} Result数据结构"""
-
- service_id: str = Field(..., alias="serviceId", description="MCP服务ID")
- icon: str = Field(description="图标", default="")
- name: str = Field(..., description="MCP服务名称")
- description: str = Field(description="MCP服务描述")
- overview: str = Field(description="MCP服务概述")
- tools: list[MCPTools] = Field(description="MCP服务Tools列表", default=[])
- status: MCPInstallStatus = Field(
- description="MCP服务状态",
- default=MCPInstallStatus.INIT,
- )
-
-
-class EditMCPServiceMsg(BaseModel):
- """编辑MCP服务"""
-
- service_id: str = Field(..., alias="serviceId", description="MCP服务ID")
- icon: str = Field(description="图标", default="")
- name: str = Field(..., description="MCP服务名称")
- description: str = Field(description="MCP服务描述")
- overview: str = Field(description="MCP服务概述")
- data: dict[str, Any] = Field(description="MCP服务配置")
- mcp_type: MCPType = Field(alias="mcpType", description="MCP 类型")
-
-
-class GetMCPServiceDetailRsp(ResponseData):
- """GET /api/service/{serviceId} 返回数据结构"""
-
- result: GetMCPServiceDetailMsg | EditMCPServiceMsg = Field(..., title="Result")
-
-
-class DeleteMCPServiceRsp(ResponseData):
- """DELETE /api/service/{serviceId} 返回数据结构"""
-
- result: BaseMCPServiceOperationMsg = Field(..., title="Result")
-
-
class FlowStructureGetMsg(BaseModel):
"""GET /api/flow result"""
@@ -513,12 +262,6 @@ class UserGetRsp(ResponseData):
result: UserGetMsp
-class ActiveMCPServiceRsp(ResponseData):
- """POST /api/mcp/active/{serviceId} 返回数据结构"""
-
- result: BaseMCPServiceOperationMsg = Field(..., title="Result")
-
-
class LLMProvider(BaseModel):
"""LLM提供商数据结构"""
@@ -538,7 +281,6 @@ class LLMProviderInfo(BaseModel):
"""LLM数据结构"""
llm_id: str = Field(alias="llmId", description="LLM ID")
- icon: str = Field(default="", description="LLM图标", max_length=25536)
openai_base_url: str = Field(
default="https://api.openai.com/v1",
description="OpenAI API Base URL",
diff --git a/apps/schemas/service.py b/apps/schemas/service.py
index b7c803ad08c7d5bd70cfdcd16cc0ee7aaba34bfe..7a2c2c78a5aa2dd9d42dff6cb569d150fba2b770 100644
--- a/apps/schemas/service.py
+++ b/apps/schemas/service.py
@@ -1,9 +1,14 @@
-from pydantic import BaseModel, Field
-
+# Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved.
+"""语义接口中心相关数据结构"""
import uuid
from typing import Any
+from pydantic import BaseModel, Field
+
+from .flow_topology import NodeServiceItem
+from .response_data import ResponseData
+
class UpdateServiceRequest(BaseModel):
"""POST /api/service 请求数据结构"""
@@ -15,4 +20,103 @@ class UpdateServiceRequest(BaseModel):
class ChangeFavouriteServiceRequest(BaseModel):
"""PUT /api/service/{serviceId} 请求数据结构"""
- favorited: bool = Field(..., description="是否收藏")
\ No newline at end of file
+ favorited: bool = Field(..., description="是否收藏")
+
+
+class ServiceCardItem(BaseModel):
+ """语义接口中心:服务卡片数据结构"""
+
+ service_id: uuid.UUID = Field(..., alias="serviceId", description="服务ID")
+ name: str = Field(..., description="服务名称")
+ description: str = Field(..., description="服务简介")
+ icon: str = Field(..., description="服务图标")
+ author: str = Field(..., description="服务作者")
+ favorited: bool = Field(..., description="是否已收藏")
+
+
+class ServiceApiData(BaseModel):
+ """语义接口中心:服务 API 接口属性数据结构"""
+
+ name: str = Field(..., description="接口名称")
+ path: str = Field(..., description="接口路径")
+ description: str = Field(..., description="接口描述")
+
+
+class BaseServiceOperationMsg(BaseModel):
+ """语义接口中心:基础服务操作Result数据结构"""
+
+ service_id: uuid.UUID = Field(..., alias="serviceId", description="服务ID")
+
+
+class GetServiceListMsg(BaseModel):
+ """GET /api/service Result数据结构"""
+
+ current_page: int = Field(..., alias="currentPage", description="当前页码")
+ total_count: int = Field(..., alias="totalCount", description="总服务数")
+ services: list[ServiceCardItem] = Field(..., description="解析后的服务列表")
+
+
+class GetServiceListRsp(ResponseData):
+ """GET /api/service 返回数据结构"""
+
+ result: GetServiceListMsg = Field(..., title="Result")
+
+
+class UpdateServiceMsg(BaseModel):
+ """语义接口中心:服务属性数据结构"""
+
+ service_id: uuid.UUID = Field(..., alias="serviceId", description="服务ID")
+ name: str = Field(..., description="服务名称")
+ apis: list[ServiceApiData] = Field(..., description="解析后的接口列表")
+
+
+class UpdateServiceRsp(ResponseData):
+ """POST /api/service 返回数据结构"""
+
+ result: UpdateServiceMsg = Field(..., title="Result")
+
+
+class GetServiceDetailMsg(BaseModel):
+ """GET /api/service/{serviceId} Result数据结构"""
+
+ service_id: uuid.UUID = Field(..., alias="serviceId", description="服务ID")
+ name: str = Field(..., description="服务名称")
+ apis: list[ServiceApiData] | None = Field(default=None, description="解析后的接口列表")
+ data: dict[str, Any] | None = Field(default=None, description="YAML 内容数据对象")
+
+
+class GetServiceDetailRsp(ResponseData):
+ """GET /api/service/{serviceId} 返回数据结构"""
+
+ result: GetServiceDetailMsg = Field(..., title="Result")
+
+
+class DeleteServiceRsp(ResponseData):
+ """DELETE /api/service/{serviceId} 返回数据结构"""
+
+ result: BaseServiceOperationMsg = Field(..., title="Result")
+
+
+class ChangeFavouriteServiceMsg(BaseModel):
+ """PUT /api/service/{serviceId} Result数据结构"""
+
+ service_id: uuid.UUID = Field(..., alias="serviceId", description="服务ID")
+ favorited: bool = Field(..., description="是否已收藏")
+
+
+class ChangeFavouriteServiceRsp(ResponseData):
+ """PUT /api/service/{serviceId} 返回数据结构"""
+
+ result: ChangeFavouriteServiceMsg = Field(..., title="Result")
+
+
+class NodeServiceListMsg(BaseModel):
+ """GET /api/flow/service result"""
+
+ services: list[NodeServiceItem] = Field(description="服务列表", default=[])
+
+
+class NodeServiceListRsp(ResponseData):
+ """GET /api/flow/service 返回数据结构"""
+
+ result: NodeServiceListMsg
diff --git a/apps/scripts/delete_user.py b/apps/scripts/delete_user.py
deleted file mode 100644
index 39e1147b6842b7a0be41321fe6af625637c7f8f7..0000000000000000000000000000000000000000
--- a/apps/scripts/delete_user.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved.
-"""删除30天未登录用户"""
-
-import logging
-from datetime import UTC, datetime, timedelta
-
-import asyncer
-
-from apps.services.knowledge_base import KnowledgeBaseService
-from apps.services.session import SessionManager
-from apps.services.user import UserManager
-
-logger = logging.getLogger(__name__)
-
-
-async def _delete_user(timestamp: float) -> None:
- """异步删除用户"""
- user_ids = await UserManager.query_userinfo_by_login_time(timestamp)
- for user_id in user_ids:
- await UserManager.delete_user(user_id)
- # 查找用户关联的文件
- doc_collection = MongoDB().get_collection("document")
- docs = [doc["_id"] async for doc in doc_collection.find({"user_sub": user_id})]
- # 删除文件
- try:
- await doc_collection.delete_many({"_id": {"$in": docs}})
- session_id = await SessionManager.get_session_by_user_sub(user_id)
- await KnowledgeBaseService.delete_doc_from_rag(session_id, docs)
- except Exception:
- logger.exception("[DeleteUserCron] 自动删除用户 %s 文档失败", user_id)
-
-
-if __name__ == "__main__":
- """删除用户"""
- try:
- timepoint = datetime.now(UTC) - timedelta(days=30)
- timestamp = timepoint.timestamp()
- asyncer.syncify(_delete_user)(timestamp)
- except Exception:
- logger.exception("[DeleteUserCron] 自动删除用户失败")
diff --git a/apps/services/flow.py b/apps/services/flow.py
index a32779f45bf9cc6f815a43aace2b6d9006d17073..ae07a394bff8311b09a7ae457645d38ff499992b 100644
--- a/apps/services/flow.py
+++ b/apps/services/flow.py
@@ -16,7 +16,7 @@ from apps.scheduler.pool.loader.app import AppLoader
from apps.scheduler.pool.loader.flow import FlowLoader
from apps.scheduler.slot.slot import Slot
from apps.schemas.enum_var import EdgeType
-from apps.schemas.flow import AppMetadata, Edge, Flow, FlowBasicConfig, FlowCheckStatus, Step
+from apps.schemas.flow import AppMetadata, Edge, Flow, Step
from apps.schemas.flow_topology import (
EdgeItem,
FlowItem,
diff --git a/apps/services/llm.py b/apps/services/llm.py
index f7c4294250e411eba0fc5d8d7d78653b1886138c..8933015871150a18fb11709bb3225ea96d3294ff 100644
--- a/apps/services/llm.py
+++ b/apps/services/llm.py
@@ -115,7 +115,6 @@ class LLMManager:
for llm in llm_list:
llm_item = LLMProviderInfo(
llmId=llm.id,
- icon=llm.icon,
openaiBaseUrl=llm.baseUrl,
openaiApiKey=llm.apiKey,
modelName=llm.modelName,
@@ -143,20 +142,24 @@ class LLMManager:
if not llm:
err = f"[LLMManager] LLM {llm_id} 不存在"
raise ValueError(err)
- llm.icon = req.icon
llm.baseUrl = req.openai_base_url
llm.apiKey = req.openai_api_key
llm.modelName = req.model_name
llm.maxToken = req.max_tokens
+ llm.provider = req.provider
+ llm.ctxLength = req.ctx_length
+ llm.extraConfig = req.extra_data or {}
await session.commit()
else:
llm = LLMData(
id=llm_id,
- icon=req.icon,
baseUrl=req.openai_base_url,
apiKey=req.openai_api_key,
modelName=req.model_name,
maxToken=req.max_tokens,
+ provider=req.provider,
+ ctxLength=req.ctx_length,
+ extraConfig=req.extra_data or {},
)
session.add(llm)
await session.commit()
@@ -178,9 +181,9 @@ class LLMManager:
)).one_or_none()
if not llm:
err = f"[LLMManager] LLM {llm_id} 不存在"
- else:
- await session.delete(llm)
- await session.commit()
+ raise ValueError(err)
+ await session.delete(llm)
+ await session.commit()
async with postgres.session() as session:
# 清除所有FunctionLLM的引用
diff --git a/apps/services/service.py b/apps/services/service.py
index 9681daf5d3a0c34779ec0c947542a23d38ca629e..0a4c792bacc5341ad2075767df198257ac480137 100644
--- a/apps/services/service.py
+++ b/apps/services/service.py
@@ -25,7 +25,7 @@ from apps.schemas.flow import (
ServiceApiConfig,
ServiceMetadata,
)
-from apps.schemas.response_data import ServiceApiData, ServiceCardItem
+from apps.schemas.service import ServiceApiData, ServiceCardItem
logger = logging.getLogger(__name__)
diff --git a/apps/services/task.py b/apps/services/task.py
index ee794a6278b3b201d33d9f7803d94316abe52209..b7b8b508510328f7c9ae1b4234d161f30676d1af 100644
--- a/apps/services/task.py
+++ b/apps/services/task.py
@@ -4,7 +4,7 @@
import logging
import uuid
-from sqlalchemy import and_, delete, select, update
+from sqlalchemy import and_, delete, select
from apps.common.postgres import postgres
from apps.models.conversation import Conversation
diff --git a/apps/templates/generate_llm_operator_config.py b/apps/templates/generate_llm_operator_config.py
deleted file mode 100644
index f3b6dda71522e5acd88c08dbf5d9a80713cfd63e..0000000000000000000000000000000000000000
--- a/apps/templates/generate_llm_operator_config.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved.
-"""生成大模型操作符配置文件"""
-
-llm_provider_dict={
- "baichuan":{
- "provider":"baichuan",
- "url":"https://api.baichuan-ai.com/v1",
- "description":"百川大模型平台",
- "icon":"",
- },
- "modelscope":{
- "provider":"modelscope",
- "url":None,
- "description":"基于魔塔部署的本地大模型服务",
- "icon":"",
- },
- "ollama":{
- "provider":"ollama",
- "url":None,
- "description":"基于Ollama部署的本地大模型服务",
- "icon":"",
- },
- "openai":{
- "provider":"openai",
- "url":"https://api.openai.com/v1",
- "description":"OpenAI大模型平台",
- "icon":"",
- },
- "qwen":{
- "provider":"qwen",
- "url":"https://dashscope.aliyuncs.com/compatible-mode/v1",
- "description":"阿里百炼大模型平台",
- "icon":"",
- },
- "spark":{
- "provider":"spark",
- "url":"https://spark-api-open.xf-yun.com/v1",
- "description":"讯飞星火大模型平台",
- "icon":"",
- },
- "vllm":{
- "provider":"vllm",
- "url":None,
- "description":"基于VLLM部署的本地大模型服务",
- "icon":"",
- },
- "wenxin":{
- "provider":"wenxin",
- "url":"https://qianfan.baidubce.com/v2",
- "description":"百度文心大模型平台",
- "icon":"",
- },
-}
diff --git a/apps/templates/llm_provider_icon/ollama.svg b/apps/templates/llm_provider_icon/ollama.svg
deleted file mode 100644
index cc887e3dcfd2260c136e807a84909892dc139fc3..0000000000000000000000000000000000000000
--- a/apps/templates/llm_provider_icon/ollama.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/apps/templates/llm_provider_icon/openai.svg b/apps/templates/llm_provider_icon/openai.svg
deleted file mode 100644
index 50d94d6c10850b193390316ae8479569227c5e10..0000000000000000000000000000000000000000
--- a/apps/templates/llm_provider_icon/openai.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/apps/templates/llm_provider_icon/vllm.svg b/apps/templates/llm_provider_icon/vllm.svg
deleted file mode 100644
index 54acc3de2d23ab2bc107f660114105988090f696..0000000000000000000000000000000000000000
--- a/apps/templates/llm_provider_icon/vllm.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/pyproject.toml b/pyproject.toml
index 24a9307cc189f0bcdbc2bc79706d7d0b3d7150a6..857fa8a39a3d28b9c0e758111818194f65a3da9f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -31,6 +31,7 @@ dependencies = [
"tiktoken==0.9.0",
"toml==0.10.2",
"uvicorn==0.34.0",
+ "xmltodict>=1.0.0",
]
[[tool.uv.index]]