diff --git a/src/app/tui.py b/src/app/tui.py index 7af0c18b67eb809770ce762936bdec18d058b698..14bba5c3cba308e6e68fc5cb977c9b1eda4372e2 100644 --- a/src/app/tui.py +++ b/src/app/tui.py @@ -646,9 +646,9 @@ class IntelligentTerminal(App): "is_first_content": True, "received_any_content": False, "start_time": start_time, - "timeout_seconds": 1800.0, # 30分钟超时,与HTTP层面保持一致 + "timeout_seconds": None, # 无总体超时限制,支持超长时间任务 "last_content_time": start_time, - "no_content_timeout": 300.0, # 5分钟无内容超时 + "no_content_timeout": 1800.0, # 30分钟无内容超时 } async def _process_stream( @@ -691,8 +691,8 @@ class IntelligentTerminal(App): output_container: Container, ) -> bool: """检查各种超时条件,返回是否应该中断处理""" - # 检查总体超时 - if current_time - stream_state["start_time"] > stream_state["timeout_seconds"]: + timeout_seconds = stream_state["timeout_seconds"] + if timeout_seconds is not None and current_time - stream_state["start_time"] > timeout_seconds: output_container.mount(OutputLine(_("Request timeout, processing stopped"), command=False)) return True @@ -1248,50 +1248,38 @@ class IntelligentTerminal(App): # 使用统一的流状态管理,与 _handle_command_stream 保持一致 stream_state = self._init_stream_state() - timeout_seconds = 1800.0 # 30分钟超时,与HTTP层面保持一致 try: - # 使用 asyncio.wait_for 包装整个流处理过程 - async def _process_stream() -> bool: - async for content in llm_client.send_mcp_response(conversation_id, params=params): - if not content.strip(): - continue - - stream_state["received_any_content"] = True - current_time = asyncio.get_event_loop().time() - - # 更新最后收到内容的时间 - if content.strip(): - stream_state["last_content_time"] = current_time - - # 检查超时 - if self._check_timeouts(current_time, stream_state, output_container): - break - - # 判断是否为 LLM 输出内容 - tool_name, _cleaned_content = extract_mcp_tag(content) - is_llm_output = tool_name is None - - # 处理内容 - await self._process_stream_content( - content, - stream_state, - output_container, - is_llm_output=is_llm_output, - ) - - # 滚动到底部 - await self._scroll_to_end() - - return stream_state["received_any_content"] + async for content in llm_client.send_mcp_response(conversation_id, params=params): + if not content.strip(): + continue + + stream_state["received_any_content"] = True + current_time = asyncio.get_event_loop().time() + + # 更新最后收到内容的时间 + if content.strip(): + stream_state["last_content_time"] = current_time + + # 检查超时 + if self._check_timeouts(current_time, stream_state, output_container): + break + + # 判断是否为 LLM 输出内容 + tool_name, _cleaned_content = extract_mcp_tag(content) + is_llm_output = tool_name is None + + # 处理内容 + await self._process_stream_content( + content, + stream_state, + output_container, + is_llm_output=is_llm_output, + ) - # 执行流处理,添加超时 - return await asyncio.wait_for(_process_stream(), timeout=timeout_seconds) + # 滚动到底部 + await self._scroll_to_end() - except TimeoutError: - output_container.mount( - OutputLine(_("⏱️ MCP response timeout ({seconds} seconds)").format(seconds=timeout_seconds)), - ) return stream_state["received_any_content"] except asyncio.CancelledError: output_container.mount(OutputLine(_("🚫 MCP response cancelled"))) diff --git a/src/backend/hermes/services/http.py b/src/backend/hermes/services/http.py index 0dce361703a69e391024bba80e0456725a06c5b0..e36ea9b02709521fcae27fa5bad69f8cfac89244 100644 --- a/src/backend/hermes/services/http.py +++ b/src/backend/hermes/services/http.py @@ -44,7 +44,7 @@ class HermesHttpManager: timeout = httpx.Timeout( connect=30.0, # 连接超时,允许30秒建立连接 - read=1800.0, # 读取超时,支持长时间SSE流(30分钟) + read=None, # 读取超时,无限制以支持超长时间SSE流 write=30.0, # 写入超时 pool=30.0, # 连接池超时 )