diff --git a/apps/llm/function.py b/apps/llm/function.py index 4aac7e1b8bd17f0dd820896a14d180b511bd63b6..89de7fe3526288c03fe535dd456c220ee4423e60 100644 --- a/apps/llm/function.py +++ b/apps/llm/function.py @@ -42,6 +42,7 @@ class FunctionLLM: self._params = { "model": self._config.model, "messages": [], + "timeout": 10 } if self._config.backend == "ollama": diff --git a/apps/llm/reasoning.py b/apps/llm/reasoning.py index fdb36fc05adf38920bcce0d962b6aafc21e44b71..bc84849047e746b69629a99926686e1636137a27 100644 --- a/apps/llm/reasoning.py +++ b/apps/llm/reasoning.py @@ -145,6 +145,7 @@ class ReasoningLLM: temperature=temperature or self._config.temperature, stream=True, stream_options={"include_usage": True}, + timeout=10 ) # type: ignore[] async def call( # noqa: C901, PLR0912, PLR0913