diff --git a/scripts/deploy/3-install-server/init_config.sh b/scripts/deploy/3-install-server/init_config.sh index 9dbd0230e7e424fe4b50118f35935784347d002c..d8cb0a909b071c02b542b662dd3d65f526f00478 100644 --- a/scripts/deploy/3-install-server/init_config.sh +++ b/scripts/deploy/3-install-server/init_config.sh @@ -604,9 +604,9 @@ install_framework() { # 追加新的配置块到文件末尾 cat <>"$framework_file" - [no_auth] enable = true + EOF fi fi diff --git a/scripts/deploy/5-resource/config.toml b/scripts/deploy/5-resource/config.toml index 4683e99a37f09128d47f10029b887df30398991c..6a29a4c3facd40d9513cdb0d692cf447e6b3b950 100644 --- a/scripts/deploy/5-resource/config.toml +++ b/scripts/deploy/5-resource/config.toml @@ -5,15 +5,16 @@ data_dir = '/opt/copilot' [login] provider = 'authhub' + [login.settings] -host = 'http://76.53.17.51:8000' +host = '' host_inner = 'http://127.0.0.1:11120' -login_api = 'http://76.53.17.51:8080/api/auth/login' +login_api = '' app_id = 'ZqMwXAjOAV0GhiTghERAahuR' app_secret = 'g6QliWAQ89aQJ6qHtKiMuqJubxF28Juw7IGF46GRjb9Ap2g0' [fastapi] -domain = '76.53.17.51' +domain = '' [security] half_key1 = 'jtbDsFPxCv1cjXCKwzWU' @@ -23,9 +24,9 @@ jwt_key = 'MF0DXTp0N9Ky9rscrwHz' [embedding] type = 'openai' -endpoint = 'http://10.50.88.44:11434/v1' -api_key = 'sk-123456' -model = 'bge-m3' +endpoint = '' +api_key = '' +model = '' [rag] rag_service = 'http://127.0.0.1:9988' @@ -44,23 +45,23 @@ secret_key = 'ZVzc6xJr3B7HsEUibVBh' secure = false [llm] -endpoint = "http://10.50.88.44:11434/v1" -key = 'sk-123456' -model = 'deepseek-llm-7b-chat' +endpoint = '' +key = '' +model = '' max_tokens = 8192 temperature = 0.7 [function_call] -backend = 'ollama' -endpoint = 'http://10.50.88.44:11434' -model = 'deepseek-llm-7b-chat' -api_key = 'sk-123456' +backend = 'function_call' +endpoint = '' +model = '' +api_key = '' max_tokens = 8192 temperature = 0.7 [check] enable = false -words_list = "" +words_list = '' [extra] -sql_url = '' \ No newline at end of file +sql_url = '' diff --git a/scripts/deploy/deploy.sh b/scripts/deploy/deploy.sh index fc933780aa235aa47fd29615c8c595d5f260eded..8baccb063fb069be295f020dae0ce30d2f4a0879 100644 --- a/scripts/deploy/deploy.sh +++ b/scripts/deploy/deploy.sh @@ -306,9 +306,9 @@ agent_manager() { fi # 安装 MCP 服务 - 3-install-server/install_mcpserver.sh + ./3-install-server/install_mcpserver.sh # 初始化 MCP 服务 - 3-install-server/init_mcpserver.sh || { + ./3-install-server/init_mcpserver.sh || { echo -e "\n${COLOR_ERROR} 初始化 Agent 失败,请检查 MCP 服务是否可用,使用 Agent 初始化工具创建 Agent,详见部署文档...${COLOR_RESET}" exit 1 } diff --git a/src/app/deployment/models.py b/src/app/deployment/models.py index 43b9514337a8cb7eb265389fa28ae14f312b35d8..82c62719a7a8d87874944b91febf3d13e754eab7 100644 --- a/src/app/deployment/models.py +++ b/src/app/deployment/models.py @@ -4,6 +4,8 @@ 定义部署过程中需要的配置项数据结构。 """ +from __future__ import annotations + from dataclasses import dataclass, field # 常量定义 @@ -79,7 +81,7 @@ class DeploymentConfig: # 验证 LLM 字段 errors.extend(self._validate_llm_fields()) - # 验证 Embedding 字段 + # 验证 Embedding 字段(根据部署模式决定是否必须) errors.extend(self._validate_embedding_fields()) # 验证数值范围 @@ -98,7 +100,8 @@ class DeploymentConfig: tuple[bool, str, dict]: (是否验证成功, 消息, 验证详细信息) """ - from .validators import APIValidator + # 懒导入以避免循环导入 + from .validators import APIValidator # noqa: PLC0415 # 检查必要字段是否完整 if not (self.llm.endpoint.strip() and self.llm.api_key.strip() and self.llm.model.strip()): @@ -125,7 +128,8 @@ class DeploymentConfig: tuple[bool, str, dict]: (是否验证成功, 消息, 验证详细信息) """ - from .validators import APIValidator + # 懒导入以避免循环导入 + from .validators import APIValidator # noqa: PLC0415 # 检查必要字段是否完整 if not (self.embedding.endpoint.strip() and self.embedding.api_key.strip() and self.embedding.model.strip()): @@ -162,12 +166,43 @@ class DeploymentConfig: def _validate_embedding_fields(self) -> list[str]: """验证 Embedding 配置字段""" errors = [] - if not self.embedding.endpoint.strip(): - errors.append("Embedding API 端点不能为空") - if not self.embedding.api_key.strip(): - errors.append("Embedding API 密钥不能为空") - if not self.embedding.model.strip(): - errors.append("Embedding 模型名称不能为空") + + # 检查是否有任何 Embedding 字段已填写 + has_embedding_config = any([ + self.embedding.endpoint.strip(), + self.embedding.api_key.strip(), + self.embedding.model.strip(), + ]) + + # 轻量部署模式下,Embedding 配置是可选的 + if self.deployment_mode == "light": + # 如果用户填了任何 Embedding 字段,则所有字段都必须完整 + if has_embedding_config: + if not self.embedding.endpoint.strip(): + errors.append( + "Embedding API 端点不能为空" + "(轻量部署模式下,如果填写 Embedding 配置,所有字段都必须完整)", + ) + if not self.embedding.api_key.strip(): + errors.append( + "Embedding API 密钥不能为空" + "(轻量部署模式下,如果填写 Embedding 配置,所有字段都必须完整)", + ) + if not self.embedding.model.strip(): + errors.append( + "Embedding 模型名称不能为空" + "(轻量部署模式下,如果填写 Embedding 配置,所有字段都必须完整)", + ) + # 如果没有填写,则跳过验证 + else: + # 全量部署模式下,Embedding 配置是必需的 + if not self.embedding.endpoint.strip(): + errors.append("Embedding API 端点不能为空") + if not self.embedding.api_key.strip(): + errors.append("Embedding API 密钥不能为空") + if not self.embedding.model.strip(): + errors.append("Embedding 模型名称不能为空") + return errors def _validate_numeric_fields(self) -> list[str]: diff --git a/src/app/deployment/service.py b/src/app/deployment/service.py index 2668d098739fe52bc8b3aa32916372ef2254c082..cf5ef3de0e2b4ba1f2ec293b3a0a187d8b29a5c4 100644 --- a/src/app/deployment/service.py +++ b/src/app/deployment/service.py @@ -124,7 +124,9 @@ class DeploymentResourceManager: # 更新 Embedding 配置 if "embedding" in toml_data: toml_data["embedding"]["type"] = config.embedding.type + toml_data["embedding"]["endpoint"] = config.embedding.endpoint toml_data["embedding"]["api_key"] = config.embedding.api_key + toml_data["embedding"]["model"] = config.embedding.model # 将更新后的数据转换回 TOML 格式 return toml.dumps(toml_data) diff --git a/src/app/deployment/ui.py b/src/app/deployment/ui.py index ed0da57d6587fe0437987a84d2d38d3c3644761f..06f1ce962a89897cd0af3967dd66390207b8c280 100644 --- a/src/app/deployment/ui.py +++ b/src/app/deployment/ui.py @@ -473,6 +473,13 @@ class DeploymentConfigScreen(ModalScreen[bool]): with Vertical(classes="embedding-config-container"): yield Static("嵌入模型配置", classes="form-label") + # 添加轻量部署说明 + yield Static( + "[dim]轻量部署模式下,Embedding 配置为可选项。[/dim]", + id="embedding_mode_hint", + classes="form-input", + ) + with Horizontal(classes="form-row"): yield Label("API 端点:", classes="form-label") yield Input( @@ -506,7 +513,7 @@ class DeploymentConfigScreen(ModalScreen[bool]): async def on_deploy_button_pressed(self) -> None: """处理部署按钮点击""" if self._collect_config(): - # 验证配置 + # 基础配置验证 is_valid, errors = self.config.validate() if not is_valid: await self.app.push_screen( @@ -514,8 +521,69 @@ class DeploymentConfigScreen(ModalScreen[bool]): ) return - # 推送部署进度屏幕,不再自动退出应用 - # 部署进度屏幕会根据用户选择决定是否退出或返回配置 + # LLM function call 能力验证 + llm_valid, llm_message, llm_info = await self.config.validate_llm_connectivity() + if not llm_valid: + await self.app.push_screen( + ErrorMessageScreen( + "LLM 配置验证失败", + [f"LLM 配置无效:{llm_message}"], + ), + ) + return + + # 检查 LLM 是否支持 function call + if not llm_info.get("supports_function_call", False): + await self.app.push_screen( + ErrorMessageScreen( + "LLM 功能不满足要求", + [ + "所选的 LLM 模型不支持 function call 功能,无法继续部署。", + "请选择支持 function call 的模型(如 OpenAI GPT 系列、通义千问、DeepSeek 等)。", + ], + ), + ) + return + + # 轻量部署模式下的 Embedding 验证 + if self.config.deployment_mode == "light": + # 检查是否填写了 Embedding 配置 + has_embedding = any( + [ + self.config.embedding.endpoint.strip(), + self.config.embedding.api_key.strip(), + self.config.embedding.model.strip(), + ], + ) + + if has_embedding: + # 如果填了 Embedding 配置,需要验证连通性 + embed_valid, embed_message, _ = await self.config.validate_embedding_connectivity() + if not embed_valid: + await self.app.push_screen( + ErrorMessageScreen( + "Embedding 配置验证失败", + [ + f"Embedding 配置无效:{embed_message}", + "轻量部署模式下,如果填写了 Embedding 配置,必须确保配置正确。", + "您可以选择清空 Embedding 配置字段来跳过此验证。", + ], + ), + ) + return + else: + # 全量部署模式下,必须验证 Embedding + embed_valid, embed_message, _ = await self.config.validate_embedding_connectivity() + if not embed_valid: + await self.app.push_screen( + ErrorMessageScreen( + "Embedding 配置验证失败", + [f"Embedding 配置无效:{embed_message}"], + ), + ) + return + + # 所有验证通过,开始部署 await self.app.push_screen(DeploymentProgressScreen(self.config)) @on(Button.Pressed, "#cancel") @@ -530,17 +598,38 @@ class DeploymentConfigScreen(ModalScreen[bool]): try: btn = self.query_one("#deployment_mode_btn", Button) desc = self.query_one("#deployment_mode_desc", Static) + # 如果当前为轻量,则切换到全量 if btn.label and "轻量" in str(btn.label): btn.label = "全量部署" desc.update("全量部署:部署框架服务、Web 界面和 RAG 组件,需手动配置 Agent。") + # 更新 Embedding 配置提示 + self._update_embedding_hint(is_light_mode=False) else: btn.label = "轻量部署" desc.update("轻量部署:仅部署框架服务,自动初始化 Agent。") + # 更新 Embedding 配置提示 + self._update_embedding_hint(is_light_mode=True) except (AttributeError, ValueError): # 查询失败或属性错误时忽略 return + def _update_embedding_hint(self, *, is_light_mode: bool) -> None: + """更新 Embedding 配置提示信息""" + try: + hint_widget = self.query_one("#embedding_mode_hint", Static) + if is_light_mode: + hint_widget.update( + "[dim]轻量部署模式下,Embedding 配置为可选项。如果不填写,将跳过 RAG 功能。[/dim]", + ) + else: + hint_widget.update( + "[dim]全量部署模式下,Embedding 配置为必填项,用于支持 RAG 功能。[/dim]", + ) + except (AttributeError, ValueError): + # 如果控件不存在,忽略错误 + pass + @on(Input.Changed, "#llm_endpoint, #llm_api_key, #llm_model") async def on_llm_field_changed(self, event: Input.Changed) -> None: """处理 LLM 字段变化,检查是否需要自动验证""" @@ -581,7 +670,23 @@ class DeploymentConfigScreen(ModalScreen[bool]): endpoint = self.query_one("#embedding_endpoint", Input).value.strip() api_key = self.query_one("#embedding_api_key", Input).value.strip() model = self.query_one("#embedding_model", Input).value.strip() + + # 检查部署模式 + try: + btn = self.query_one("#deployment_mode_btn", Button) + label = str(btn.label) if btn.label is not None else "" + is_light_mode = "轻量" in label + except (AttributeError, ValueError): + is_light_mode = True # 默认为轻量模式 + + # 轻量模式下,只有在用户填写了 Embedding 字段时才验证 + if is_light_mode: + has_embedding_config = bool(endpoint or api_key or model) + return has_embedding_config and bool(endpoint and api_key and model) + + # 全量模式下,必须验证 return bool(endpoint and api_key and model) + except (AttributeError, ValueError): return False @@ -616,12 +721,17 @@ class DeploymentConfigScreen(ModalScreen[bool]): # 更新验证状态 if is_valid: - status_widget.update(f"[green]✓ {message}[/green]") # 检查是否支持 function_call - if info.get("supports_function_call"): + supports_function_call = info.get("supports_function_call", False) + if supports_function_call: + status_widget.update(f"[green]✓ {message}[/green]") self.notify("LLM 验证成功,支持 function_call 功能", severity="information") else: - self.notify("LLM 验证成功,但不支持 function_call 功能", severity="warning") + status_widget.update("[red]✗ 不支持 function_call[/red]") + self.notify( + "LLM 验证失败:模型不支持 function_call 功能,无法用于部署。请选择支持 function_call 的模型。", + severity="error", + ) else: status_widget.update(f"[red]✗ {message}[/red]") self.notify(f"LLM 验证失败: {message}", severity="error")