From 8c5732b6973453bb9157401c09ac66935f31c3f4 Mon Sep 17 00:00:00 2001 From: bussyjd Date: Sat, 28 Feb 2026 00:52:08 +0400 Subject: [PATCH] feat(ollama): make tool_call interception configurable per-provider OllamaProvider hardcodes tool_call: True for all models, causing llmspy to intercept tool_calls and execute them server-side via g_exec_tool(). When llmspy is used as a proxy (e.g. for OpenClaw), the client needs tool_calls returned in the response, not executed by llmspy. Add tool_call_enabled attribute to OllamaProvider, sourced from providers.json kwargs (defaults to True for backwards compat). Setting "tool_call": false in providers.json makes tool_calls pass through to the client untouched. --- llms/main.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/llms/main.py b/llms/main.py index d58339d..0b78ff6 100755 --- a/llms/main.py +++ b/llms/main.py @@ -1353,6 +1353,9 @@ def __init__(self, **kwargs): super().__init__(**kwargs) # Ollama's OpenAI-compatible endpoint is at /v1/chat/completions self.chat_url = f"{self.api}/v1/chat/completions" + # Allow disabling server-side tool execution via providers.json + # When False, tool_calls pass through to the client untouched + self.tool_call_enabled = kwargs.get("tool_call", True) async def load(self): if not self.models: @@ -1400,7 +1403,7 @@ async def load_models(self): "id": k, "name": v.replace(":", " "), "modalities": {"input": ["text"], "output": ["text"]}, - "tool_call": True, + "tool_call": self.tool_call_enabled, "cost": { "input": 0, "output": 0,