From ad081d412c13d46233a614267dd8cae6cf3433ed Mon Sep 17 00:00:00 2001 From: dullfig Date: Sun, 11 Jan 2026 14:05:53 -0800 Subject: [PATCH] Fix platform.complete() to use correct LLM router API - Import `complete` not `generate` from agentserver.llm - Pass `model` parameter required by router - Return response.content (string) not response object Co-Authored-By: Claude Opus 4.5 --- agentserver/platform/llm_api.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/agentserver/platform/llm_api.py b/agentserver/platform/llm_api.py index 0ba46e8..396f2ff 100644 --- a/agentserver/platform/llm_api.py +++ b/agentserver/platform/llm_api.py @@ -118,9 +118,13 @@ async def complete( # Make LLM call via router try: - from agentserver.llm import generate + from agentserver.llm import complete as llm_complete - response = await generate( + # Use model from kwargs or default + model = kwargs.pop("model", "grok-3-mini-beta") + + response = await llm_complete( + model=model, messages=messages, temperature=temperature, max_tokens=max_tokens, @@ -129,10 +133,10 @@ async def complete( logger.debug( f"platform.complete: agent={agent_name} thread={thread_id[:8]}... " - f"messages={len(messages)} response_len={len(response)}" + f"messages={len(messages)} response_len={len(response.content)}" ) - return response + return response.content except Exception as e: logger.error(f"LLM call failed for {agent_name}: {e}")