AI logic with Claude API integration, tool execution, and system prompts. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
175 lines
5.8 KiB
Python
175 lines
5.8 KiB
Python
"""
|
|
Egregore Brain - Conversation processing with tool use loop
|
|
"""
|
|
|
|
import json
|
|
import uuid
|
|
from typing import Any
|
|
|
|
import anthropic
|
|
|
|
from tools import TOOLS, execute_tool
|
|
from prompts import get_system_prompt
|
|
|
|
|
|
def extract_embedded_tool_calls(text: str) -> tuple[list, str]:
|
|
"""
|
|
Detect if text contains JSON-encoded tool calls and extract them.
|
|
Returns (tool_calls, remaining_text) where tool_calls is a list of parsed
|
|
tool_use dicts if found, or empty list if not.
|
|
"""
|
|
if not text or not text.strip().startswith('['):
|
|
return [], text
|
|
|
|
try:
|
|
parsed = json.loads(text.strip())
|
|
if isinstance(parsed, list) and len(parsed) > 0:
|
|
if all(isinstance(b, dict) and b.get('type') in ('tool_use', 'tool_result', 'text') for b in parsed):
|
|
tool_calls = [b for b in parsed if b.get('type') == 'tool_use']
|
|
if tool_calls:
|
|
return tool_calls, ""
|
|
except (json.JSONDecodeError, TypeError):
|
|
pass
|
|
|
|
return [], text
|
|
|
|
|
|
def serialize_content_blocks(blocks) -> list:
|
|
"""Convert Claude API blocks to JSON-serializable format"""
|
|
result = []
|
|
for block in blocks:
|
|
if hasattr(block, 'type'):
|
|
if block.type == "text":
|
|
embedded_tools, remaining = extract_embedded_tool_calls(block.text)
|
|
if embedded_tools:
|
|
for tool in embedded_tools:
|
|
result.append(tool)
|
|
elif remaining:
|
|
result.append({"type": "text", "content": remaining})
|
|
elif block.type == "tool_use":
|
|
result.append({
|
|
"type": "tool_use",
|
|
"id": block.id,
|
|
"name": block.name,
|
|
"input": block.input
|
|
})
|
|
elif isinstance(block, dict):
|
|
result.append(block)
|
|
return result
|
|
|
|
|
|
def extract_text_from_blocks(blocks) -> str:
|
|
"""Extract plain text for notifications etc"""
|
|
texts = []
|
|
for block in blocks:
|
|
if hasattr(block, 'type') and block.type == "text":
|
|
texts.append(block.text)
|
|
elif isinstance(block, dict) and block.get("type") == "text":
|
|
texts.append(block.get("content", ""))
|
|
return "\n".join(texts)
|
|
|
|
|
|
async def process_conversation(
|
|
client: anthropic.AsyncAnthropic,
|
|
model: str,
|
|
history: list[dict],
|
|
max_iterations: int = 10
|
|
) -> list[dict]:
|
|
"""
|
|
Process a conversation with tool use loop.
|
|
|
|
Args:
|
|
client: Async Anthropic client
|
|
model: Model ID to use
|
|
history: Conversation history in Claude API format
|
|
max_iterations: Maximum tool use iterations
|
|
|
|
Returns:
|
|
List of response blocks (text, tool_use, tool_result)
|
|
"""
|
|
system_prompt = await get_system_prompt()
|
|
all_response_blocks = []
|
|
|
|
for _ in range(max_iterations):
|
|
response = await client.messages.create(
|
|
model=model,
|
|
max_tokens=4096,
|
|
system=system_prompt,
|
|
messages=history,
|
|
tools=TOOLS
|
|
)
|
|
|
|
tool_uses = []
|
|
embedded_tool_calls = []
|
|
|
|
for block in response.content:
|
|
if block.type == "tool_use":
|
|
tool_uses.append(block)
|
|
all_response_blocks.append({
|
|
"type": "tool_use",
|
|
"id": block.id,
|
|
"name": block.name,
|
|
"input": block.input
|
|
})
|
|
elif block.type == "text":
|
|
embedded, remaining = extract_embedded_tool_calls(block.text)
|
|
if embedded:
|
|
embedded_tool_calls.extend(embedded)
|
|
for tool in embedded:
|
|
all_response_blocks.append(tool)
|
|
elif remaining:
|
|
all_response_blocks.append({
|
|
"type": "text",
|
|
"content": remaining
|
|
})
|
|
|
|
if not tool_uses and not embedded_tool_calls:
|
|
break
|
|
|
|
# Handle embedded tool calls (model misbehavior - output JSON as text)
|
|
if embedded_tool_calls and not tool_uses:
|
|
embedded_results = []
|
|
for idx, tool_call in enumerate(embedded_tool_calls):
|
|
tool_id = tool_call.get("id", f"embedded_{idx}")
|
|
tool_name = tool_call.get("name")
|
|
tool_input = tool_call.get("input", {})
|
|
|
|
if tool_name:
|
|
result = await execute_tool(tool_name, tool_input)
|
|
embedded_results.append({
|
|
"type": "tool_result",
|
|
"tool_use_id": tool_id,
|
|
"content": result
|
|
})
|
|
all_response_blocks.append({
|
|
"type": "tool_result",
|
|
"tool_use_id": tool_id,
|
|
"tool_name": tool_name,
|
|
"content": result
|
|
})
|
|
|
|
if embedded_results:
|
|
history.append({"role": "assistant", "content": response.content})
|
|
history.append({"role": "user", "content": embedded_results})
|
|
continue
|
|
|
|
# Execute tools and add results to history
|
|
tool_results = []
|
|
for tool_use in tool_uses:
|
|
result = await execute_tool(tool_use.name, tool_use.input)
|
|
tool_results.append({
|
|
"type": "tool_result",
|
|
"tool_use_id": tool_use.id,
|
|
"content": result
|
|
})
|
|
all_response_blocks.append({
|
|
"type": "tool_result",
|
|
"tool_use_id": tool_use.id,
|
|
"tool_name": tool_use.name,
|
|
"content": result
|
|
})
|
|
|
|
history.append({"role": "assistant", "content": response.content})
|
|
history.append({"role": "user", "content": tool_results})
|
|
|
|
return all_response_blocks
|