Skip to content

Agent API

The core agent implementation for PatchPal, providing the main interface for interacting with LLMs and executing tools.

Creating an Agent

patchpal.agent.create_agent(model_id='anthropic/claude-sonnet-4-5', custom_tools=None, enabled_tools=None, litellm_kwargs=None)

Create and return a PatchPal agent.

Parameters:

Name Type Description Default
model_id str

LiteLLM model identifier (default: anthropic/claude-sonnet-4-5)

'anthropic/claude-sonnet-4-5'
custom_tools Optional[List[Callable]]

Optional list of Python functions to use as custom tools. Each function should have type hints and a docstring.

None
enabled_tools Optional[List[str]]

Optional list of tool names to enable (whitelist). If provided, only these built-in tools will be available. Custom tools are always added. Takes precedence over PATCHPAL_ENABLED_TOOLS environment variable. Example: ["read_file", "edit_file", "run_shell"]

None
litellm_kwargs Optional[Dict[str, Any]]

Optional dict of extra parameters to pass to litellm.completion() (e.g., {"reasoning_effort": "high"} for reasoning models)

None

Returns:

Type Description
PatchPalAgent

A configured PatchPalAgent instance

Example

def calculator(x: int, y: int) -> str: '''Add two numbers.

Args:
    x: First number
    y: Second number
'''
return str(x + y)

agent = create_agent(custom_tools=[calculator]) response = agent.run("What's 5 + 3?")

Limit to read-only tools

agent = create_agent( enabled_tools=["read_file", "read_lines", "code_structure"] )

With reasoning model

agent = create_agent( model_id="ollama_chat/gpt-oss:120b", litellm_kwargs={"reasoning_effort": "high"} )

Source code in patchpal/agent/function_calling.py
def create_agent(
    model_id: str = "anthropic/claude-sonnet-4-5",
    custom_tools: Optional[List[Callable]] = None,
    enabled_tools: Optional[List[str]] = None,
    litellm_kwargs: Optional[Dict[str, Any]] = None,
) -> PatchPalAgent:
    """Create and return a PatchPal agent.

    Args:
        model_id: LiteLLM model identifier (default: anthropic/claude-sonnet-4-5)
        custom_tools: Optional list of Python functions to use as custom tools.
                     Each function should have type hints and a docstring.
        enabled_tools: Optional list of tool names to enable (whitelist). If provided,
                      only these built-in tools will be available. Custom tools are
                      always added. Takes precedence over PATCHPAL_ENABLED_TOOLS
                      environment variable.
                      Example: ["read_file", "edit_file", "run_shell"]
        litellm_kwargs: Optional dict of extra parameters to pass to litellm.completion()
                       (e.g., {"reasoning_effort": "high"} for reasoning models)

    Returns:
        A configured PatchPalAgent instance

    Example:
        def calculator(x: int, y: int) -> str:
            '''Add two numbers.

            Args:
                x: First number
                y: Second number
            '''
            return str(x + y)

        agent = create_agent(custom_tools=[calculator])
        response = agent.run("What's 5 + 3?")

        # Limit to read-only tools
        agent = create_agent(
            enabled_tools=["read_file", "read_lines", "code_structure"]
        )

        # With reasoning model
        agent = create_agent(
            model_id="ollama_chat/gpt-oss:120b",
            litellm_kwargs={"reasoning_effort": "high"}
        )
    """
    # Reset session todos for new session
    from patchpal.tools import reset_session_todos

    reset_session_todos()

    return PatchPalAgent(
        model_id=model_id,
        custom_tools=custom_tools,
        enabled_tools=enabled_tools,
        litellm_kwargs=litellm_kwargs,
    )

patchpal.agent.create_react_agent(model_id='ollama_chat/llama3.2', custom_tools=None, enabled_tools=None, litellm_kwargs=None, custom_instructions='')

Create and return a ReAct agent.

This agent uses text-based tool invocation instead of native function calling, making it compatible with models that don't support function calling.

Parameters:

Name Type Description Default
model_id str

LiteLLM model identifier (default: ollama_chat/llama3.2)

'ollama_chat/llama3.2'
custom_tools Optional[List[Callable]]

Optional list of Python functions to use as custom tools

None
enabled_tools Optional[List[str]]

Optional list of tool names to enable (whitelist)

None
litellm_kwargs Optional[Dict[str, Any]]

Optional dict of extra parameters for litellm.completion()

None
custom_instructions str

Optional custom instructions to prepend to system prompt

''

Returns:

Type Description
ReActAgent

A configured ReActAgent instance

Example

Basic usage with Ollama

agent = create_react_agent(model_id="ollama_chat/llama3.2") response = agent.run("What files are in the src directory?")

With custom tools

def calculator(x: int, y: int) -> str: '''Add two numbers.''' return str(x + y)

agent = create_react_agent( model_id="ollama_chat/qwen2.5", custom_tools=[calculator] )

Source code in patchpal/agent/react.py
def create_react_agent(
    model_id: str = "ollama_chat/llama3.2",
    custom_tools: Optional[List[Callable]] = None,
    enabled_tools: Optional[List[str]] = None,
    litellm_kwargs: Optional[Dict[str, Any]] = None,
    custom_instructions: str = "",
) -> ReActAgent:
    """Create and return a ReAct agent.

    This agent uses text-based tool invocation instead of native function calling,
    making it compatible with models that don't support function calling.

    Args:
        model_id: LiteLLM model identifier (default: ollama_chat/llama3.2)
        custom_tools: Optional list of Python functions to use as custom tools
        enabled_tools: Optional list of tool names to enable (whitelist)
        litellm_kwargs: Optional dict of extra parameters for litellm.completion()
        custom_instructions: Optional custom instructions to prepend to system prompt

    Returns:
        A configured ReActAgent instance

    Example:
        # Basic usage with Ollama
        agent = create_react_agent(model_id="ollama_chat/llama3.2")
        response = agent.run("What files are in the src directory?")

        # With custom tools
        def calculator(x: int, y: int) -> str:
            '''Add two numbers.'''
            return str(x + y)

        agent = create_react_agent(
            model_id="ollama_chat/qwen2.5",
            custom_tools=[calculator]
        )
    """
    from patchpal.tools import reset_session_todos

    reset_session_todos()

    return ReActAgent(
        model_id=model_id,
        custom_tools=custom_tools,
        enabled_tools=enabled_tools,
        litellm_kwargs=litellm_kwargs,
        custom_instructions=custom_instructions,
    )

ReAct Agent for Local Models

For local models that don't support native function calling, use create_react_agent() instead of create_agent(). See Local Models - ReAct Mode for details.

Agent Classes

PatchPalAgent (Function Calling)

patchpal.agent.PatchPalAgent(model_id='anthropic/claude-sonnet-4-5', custom_tools=None, enabled_tools=None, litellm_kwargs=None)

Simple agent that uses LiteLLM for tool calling.

Initialize the agent.

Parameters:

Name Type Description Default
model_id str

LiteLLM model identifier

'anthropic/claude-sonnet-4-5'
custom_tools Optional[List[Callable]]

Optional list of Python functions to add as tools

None
enabled_tools Optional[List[str]]

Optional list of tool names to enable (whitelist). If provided, only these tools will be available. Takes precedence over PATCHPAL_ENABLED_TOOLS environment variable.

None
litellm_kwargs Optional[Dict[str, Any]]

Optional dict of extra parameters to pass to litellm.completion() (e.g., {"reasoning_effort": "high"} for reasoning models)

None
Source code in patchpal/agent/function_calling.py
def __init__(
    self,
    model_id: str = "anthropic/claude-sonnet-4-5",
    custom_tools: Optional[List[Callable]] = None,
    enabled_tools: Optional[List[str]] = None,
    litellm_kwargs: Optional[Dict[str, Any]] = None,
):
    """Initialize the agent.

    Args:
        model_id: LiteLLM model identifier
        custom_tools: Optional list of Python functions to add as tools
        enabled_tools: Optional list of tool names to enable (whitelist). If provided,
                      only these tools will be available. Takes precedence over
                      PATCHPAL_ENABLED_TOOLS environment variable.
        litellm_kwargs: Optional dict of extra parameters to pass to litellm.completion()
                      (e.g., {"reasoning_effort": "high"} for reasoning models)
    """
    # Store custom tools
    self.custom_tools = custom_tools or []
    self.custom_tool_funcs = {func.__name__: func for func in self.custom_tools}

    # Configure enabled tools (parameter takes precedence over environment variable)
    if enabled_tools is not None:
        self.enabled_tools = enabled_tools
    else:
        env_enabled = os.getenv("PATCHPAL_ENABLED_TOOLS")
        if env_enabled:
            self.enabled_tools = [t.strip() for t in env_enabled.split(",")]
        else:
            self.enabled_tools = None  # No filtering - all tools available

    # Convert ollama/ to ollama_chat/ for LiteLLM compatibility
    if model_id.startswith("ollama/"):
        model_id = model_id.replace("ollama/", "ollama_chat/", 1)

    self.model_id = _normalize_bedrock_model_id(model_id)

    # Initialize image handler for vision model support
    from patchpal.tools.image_handler import ImageHandler

    self.image_handler = ImageHandler(self.model_id)

    # Register Ollama models as supporting native function calling
    # LiteLLM defaults to JSON mode if not explicitly registered
    if self.model_id.startswith("ollama_chat/"):
        # Suppress verbose output from register_model
        import sys
        from io import StringIO

        old_stdout = sys.stdout
        sys.stdout = StringIO()
        try:
            litellm.register_model(
                {"model_cost": {self.model_id: {"supports_function_calling": True}}}
            )
        finally:
            sys.stdout = old_stdout

    # Set up Bedrock environment if needed
    if self.model_id.startswith("bedrock/"):
        _setup_bedrock_env()

    # Conversation history (list of message dicts)
    self.messages: List[Dict[str, Any]] = []

    # Initialize context manager
    self.context_manager = ContextManager(self.model_id, SYSTEM_PROMPT)

    # Check if auto-compaction is enabled (default: True)
    self.enable_auto_compact = not config.DISABLE_AUTOCOMPACT

    # Track last compaction to prevent compaction loops
    self._last_compaction_message_count = 0

    # Track cumulative token usage across all LLM calls
    self.total_llm_calls = 0
    self.cumulative_input_tokens = 0
    self.cumulative_output_tokens = 0

    # Track cache-related tokens (for Anthropic/Bedrock models with prompt caching)
    self.cumulative_cache_creation_tokens = 0
    self.cumulative_cache_read_tokens = 0

    # Track OpenAI cache tokens (prompt_tokens_details.cached_tokens)
    self.cumulative_openai_cached_tokens = 0

    # Track cumulative costs across all LLM calls
    self.cumulative_cost = 0.0
    self.last_message_cost = 0.0

    # LiteLLM settings for models that need parameter dropping
    self.litellm_kwargs = {}
    if self.model_id.startswith("bedrock/"):
        self.litellm_kwargs["drop_params"] = True
        # Configure LiteLLM to handle Bedrock's strict message alternation requirement
        # This must be set globally, not as a completion parameter
        litellm.modify_params = True
    elif self.model_id.startswith("openai/") and os.getenv("OPENAI_API_BASE"):
        # Custom OpenAI-compatible servers (vLLM, etc.) often don't support all parameters
        self.litellm_kwargs["drop_params"] = True

    # Merge in any user-provided litellm_kwargs
    if litellm_kwargs:
        self.litellm_kwargs.update(litellm_kwargs)

    # Load MEMORY.md if it exists and has non-template content
    self._load_project_memory()
run(user_message, max_iterations=100)

Run the agent on a user message.

Parameters:

Name Type Description Default
user_message str

The user's request

required
max_iterations int

Maximum number of agent iterations (default: 100)

100

Returns:

Type Description
str

The agent's final response

Source code in patchpal/agent/function_calling.py
def run(self, user_message: str, max_iterations: int = 100) -> str:
    """Run the agent on a user message.

    Args:
        user_message: The user's request
        max_iterations: Maximum number of agent iterations (default: 100)

    Returns:
        The agent's final response
    """
    # Add user message to history
    self.messages.append({"role": "user", "content": user_message})

    # Check for compaction BEFORE starting work
    # This ensures we never compact mid-execution and lose tool results
    if self.enable_auto_compact and self.context_manager.needs_compaction(self.messages):
        self._perform_auto_compaction()

    # Agent loop with interrupt handling
    try:
        return self._run_agent_loop(max_iterations)
    except KeyboardInterrupt:
        # Clean up conversation state if interrupted mid-execution
        self._cleanup_interrupted_state()
        raise  # Re-raise so CLI can handle it

ReActAgent (Text-Based Tool Calling)

patchpal.agent.ReActAgent(model_id='ollama_chat/llama3.2', custom_tools=None, enabled_tools=None, litellm_kwargs=None, custom_instructions='')

Agent that uses ReAct pattern instead of native function calling.

Initialize ReAct agent.

Parameters:

Name Type Description Default
model_id str

LiteLLM model identifier

'ollama_chat/llama3.2'
custom_tools Optional[List[Callable]]

Optional list of custom Python functions to add as tools

None
enabled_tools Optional[List[str]]

Optional list of tool names to enable (whitelist)

None
litellm_kwargs Optional[Dict[str, Any]]

Optional dict of extra parameters for litellm.completion()

None
custom_instructions str

Optional custom instructions to prepend to system prompt

''
Source code in patchpal/agent/react.py
def __init__(
    self,
    model_id: str = "ollama_chat/llama3.2",
    custom_tools: Optional[List[Callable]] = None,
    enabled_tools: Optional[List[str]] = None,
    litellm_kwargs: Optional[Dict[str, Any]] = None,
    custom_instructions: str = "",
):
    """Initialize ReAct agent.

    Args:
        model_id: LiteLLM model identifier
        custom_tools: Optional list of custom Python functions to add as tools
        enabled_tools: Optional list of tool names to enable (whitelist)
        litellm_kwargs: Optional dict of extra parameters for litellm.completion()
        custom_instructions: Optional custom instructions to prepend to system prompt
    """
    self.model_id = model_id
    self.litellm_kwargs = litellm_kwargs or {}
    self.messages = []

    # Token and cost tracking
    self.total_llm_calls = 0
    self.cumulative_input_tokens = 0
    self.cumulative_output_tokens = 0
    self.cumulative_cost = 0.0
    self.last_message_cost = 0.0

    # Track cache-related tokens (for Anthropic/Bedrock models with prompt caching)
    self.cumulative_cache_creation_tokens = 0
    self.cumulative_cache_read_tokens = 0

    # Track OpenAI cache tokens (prompt_tokens_details.cached_tokens)
    self.cumulative_openai_cached_tokens = 0

    # Get ALL built-in tools (including optional ones like grep and find)
    from patchpal.tools.definitions import TOOL_FUNCTIONS as ALL_TOOL_FUNCTIONS
    from patchpal.tools.definitions import TOOLS as ALL_TOOLS

    tools_list = list(ALL_TOOLS)
    tool_functions = dict(ALL_TOOL_FUNCTIONS)

    # Configure enabled tools (parameter takes precedence over environment variable)
    if enabled_tools is not None:
        # User specified tools via parameter - use exactly what they asked for
        tools_list = [t for t in tools_list if t["function"]["name"] in enabled_tools]
        tool_functions = {k: v for k, v in tool_functions.items() if k in enabled_tools}
    else:
        # Check environment variable
        env_enabled = os.getenv("PATCHPAL_ENABLED_TOOLS")
        if env_enabled:
            # Environment variable specified - use it
            enabled_from_env = [t.strip() for t in env_enabled.split(",")]
            tools_list = [t for t in tools_list if t["function"]["name"] in enabled_from_env]
            tool_functions = {k: v for k, v in tool_functions.items() if k in enabled_from_env}
        else:
            # No parameter, no env var - use ReAct default curated set
            react_default_tools = [
                "read_file",
                "read_lines",
                "write_file",
                "edit_file",
                "web_search",
                "web_fetch",
                "grep",
                "find",
                "run_shell",
            ]
            tools_list = [t for t in tools_list if t["function"]["name"] in react_default_tools]
            tool_functions = {
                k: v for k, v in tool_functions.items() if k in react_default_tools
            }

    # Add custom tools
    if custom_tools:
        from patchpal.tools.tool_schema import function_to_tool_schema

        for func in custom_tools:
            tools_list.append(function_to_tool_schema(func))
            tool_functions[func.__name__] = func

    self.tools = tools_list
    self.tool_functions = tool_functions

    # Build system prompt with ReAct pattern
    self.system_prompt = self._build_system_prompt(custom_instructions)

    # Context manager for token estimation
    self.context_manager = ContextManager(model_id=model_id, system_prompt=self.system_prompt)

    # Check if auto-compaction is enabled (default: True)
    self.enable_auto_compact = not config.DISABLE_AUTOCOMPACT

    # Track last compaction to prevent compaction loops
    self._last_compaction_message_count = 0

    # Initialize image handler for vision model support
    from patchpal.tools.image_handler import ImageHandler

    self.image_handler = ImageHandler(self.model_id)

    # Load project memory
    self._load_project_memory()
run(user_message, max_iterations=100)

Run the agent on a user message.

Parameters:

Name Type Description Default
user_message str

The user's question/request

required
max_iterations int

Maximum number of iterations before giving up

100

Returns:

Type Description
str

The agent's final answer

Source code in patchpal/agent/react.py
def run(self, user_message: str, max_iterations: int = 100) -> str:
    """Run the agent on a user message.

    Args:
        user_message: The user's question/request
        max_iterations: Maximum number of iterations before giving up

    Returns:
        The agent's final answer
    """
    # Add system prompt as first message if this is the start
    if not self.messages or self.messages[0]["role"] != "system":
        self.messages.insert(0, {"role": "system", "content": self.system_prompt})

    # Add user message
    self.messages.append({"role": "user", "content": user_message})

    # Pattern to match actions: "Action: tool_name" followed by "Action Input: {...}"
    action_pattern = re.compile(
        r"Action:\s*(\w+)\s*\nAction Input:\s*(.+?)(?=\n\n|$)", re.MULTILINE | re.DOTALL
    )

    # Pattern for final answer
    final_answer_pattern = re.compile(r"Final Answer:\s*(.+)", re.DOTALL | re.IGNORECASE)

    # Track recent actions to detect loops
    recent_actions = []
    max_recent = 8

    iteration = 0
    while iteration < max_iterations:
        iteration += 1

        # Make LLM call
        enable_streaming = config.STREAM_OUTPUT

        def make_completion_call(stream: bool = False):
            return litellm.completion(
                model=self.model_id,
                messages=self.messages,
                stream=stream,
                timeout=LLM_TIMEOUT,
                **self.litellm_kwargs,
            )

        try:
            if enable_streaming:
                response = stream_completion(make_completion_call, show_progress=True)
            else:
                response = make_completion_call(stream=False)

            # Track token usage
            self.total_llm_calls += 1
            if hasattr(response, "usage") and response.usage:
                if hasattr(response.usage, "prompt_tokens"):
                    self.cumulative_input_tokens += response.usage.prompt_tokens
                if hasattr(response.usage, "completion_tokens"):
                    self.cumulative_output_tokens += response.usage.completion_tokens

            # Track cost
            cost = self._calculate_cost(response)
            self.cumulative_cost += cost

        except Exception as e:
            return f"Error calling model: {e}"

        # Get assistant's response
        result = response.choices[0].message.content or ""

        # Add to messages
        self.messages.append({"role": "assistant", "content": result})

        # Look for actions FIRST (before checking for Final Answer)
        # This ensures we execute tools even if the model hallucinates an answer
        actions = action_pattern.findall(result)

        if not actions:
            # No action found - check if there's a final answer instead
            final_answer_match = final_answer_pattern.search(result)
            if final_answer_match:
                answer = final_answer_match.group(1).strip()
                # Print the thinking part (dimmed)
                thinking_part = result[: final_answer_match.start()].strip()
                if thinking_part:
                    print()
                    print(f"\033[2m{thinking_part}\033[0m")
                    print()
                return answer

            # No action and no final answer - treat the response as final answer
            # Don't loop asking for format corrections
            print()
            print(f"\033[2m{result}\033[0m")
            print()
            return result

        # Process the first action (only one action per turn)
        tool_name, tool_args_str = actions[0]

        # Print the thinking part (dimmed)
        action_index = result.find("Action:")
        thinking_part = result[:action_index].strip()
        if thinking_part:
            print()
            print(f"\033[2m{thinking_part}\033[0m")
            print()

        # Warn if model also provided a Final Answer (hallucination/confusion)
        # We'll execute the action anyway since the model explicitly requested it
        if final_answer_pattern.search(result):
            print(
                "\033[2m⚠️  Agent provided both action and final answer - executing action first\033[0m"
            )

        # Check for action loops (same action repeated)
        action_signature = f"{tool_name}:{tool_args_str[:50]}"
        recent_actions.append(action_signature)
        if len(recent_actions) > max_recent:
            recent_actions.pop(0)

        # Detect if we're looping (same action repeated max_recent times consecutively)
        if len(recent_actions) >= max_recent and len(set(recent_actions[-max_recent:])) == 1:
            error_msg = f"Loop detected: {tool_name} called {max_recent}+ times with same arguments. Try a different approach or provide an Answer."
            print(f"\033[1;33m⚠️  {error_msg}\033[0m")
            self.messages.append({"role": "user", "content": f"Observation: {error_msg}"})
            continue

        # Parse tool arguments
        tool_args = {}
        tool_args_str = tool_args_str.strip()

        # Try to extract key-value pairs using regex (more forgiving than JSON)
        # Matches various formats: "key": "value", 'key': 'value', key: value, "key": value
        param_pattern = re.compile(r'["\']?(\w+)["\']?\s*:\s*["\']?([^,"\'}\]]+)["\']?')
        matches = param_pattern.findall(tool_args_str)

        if matches:
            # Found key-value pairs - clean up values and remove surrounding quotes
            for key, value in matches:
                value = value.strip()
                # Remove surrounding quotes if present
                if (value.startswith('"') and value.endswith('"')) or (
                    value.startswith("'") and value.endswith("'")
                ):
                    value = value[1:-1]
                tool_args[key] = value
        elif tool_args_str and not tool_args_str.startswith("{"):
            # No key-value pairs, treat as a simple string argument
            tool_args = {"query": tool_args_str}
        # else: empty args

        # Get the tool function
        tool_func = self.tool_functions.get(tool_name)
        if tool_func is None:
            error_msg = f"Error: Unknown tool '{tool_name}'"
            print(f"\033[1;31m✗ {error_msg}\033[0m")
            self.messages.append({"role": "user", "content": f"Observation: {error_msg}"})
            continue

        # Show tool call
        print(f"\033[2m🔧 {tool_name}({tool_args})\033[0m", flush=True)

        # Execute the tool
        try:
            # Filter args to only include valid parameters
            sig = inspect.signature(tool_func)
            valid_params = set(sig.parameters.keys())

            # Check for **kwargs
            has_var_keyword = any(
                p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()
            )

            if has_var_keyword:
                filtered_args = tool_args
            else:
                filtered_args = {k: v for k, v in tool_args.items() if k in valid_params}

            # Type coercion
            for param_name, param in sig.parameters.items():
                if param_name in filtered_args:
                    expected_type = param.annotation
                    actual_value = filtered_args[param_name]

                    if expected_type is int and isinstance(actual_value, str):
                        filtered_args[param_name] = int(actual_value)
                    elif expected_type is bool and isinstance(actual_value, str):
                        filtered_args[param_name] = actual_value.lower() in ("true", "1", "yes")

            tool_result = tool_func(**filtered_args)

        except Exception as e:
            tool_result = f"Error executing {tool_name}: {e}"
            print(f"\033[1;31m✗ {tool_name}: {e}\033[0m")

        # Check if operation was cancelled
        if str(tool_result).strip() == "Operation cancelled by user.":
            return "Operation cancelled by user."

        # Apply output limits
        result_str = str(tool_result)
        lines = result_str.split("\n")
        total_lines = len(lines)

        if (
            total_lines > config.MAX_TOOL_OUTPUT_LINES
            or len(result_str) > config.MAX_TOOL_OUTPUT_CHARS
        ):
            truncated_lines = lines[: config.MAX_TOOL_OUTPUT_LINES]
            truncated_str = "\n".join(truncated_lines)

            if len(truncated_str) > config.MAX_TOOL_OUTPUT_CHARS:
                truncated_str = truncated_str[: config.MAX_TOOL_OUTPUT_CHARS]

            truncation_note = f"\n\n... output truncated ({total_lines:,} total lines) ..."
            result_str = truncated_str + truncation_note

        # Add observation to messages
        observation = f"Observation: {result_str}"
        self.messages.append({"role": "user", "content": observation})

        # Show observation preview
        preview = result_str[:100].replace("\n", " ")
        if len(result_str) > 100:
            preview += "..."
        print(f"\033[2m   → {preview}\033[0m")

    # Max iterations reached
    return (
        f"Maximum iterations ({max_iterations}) reached. Task may be incomplete.\n\n"
        f"Last response: {result}"
    )

Usage Example

from patchpal.agent import create_agent

# Create agent with default model
agent = create_agent()

# Or specify a model
agent = create_agent(model_id="anthropic/claude-sonnet-4-5")

# Run a task
response = agent.run("List all Python files")
print(response)

# Check token usage
print(f"Total tokens: {agent.cumulative_input_tokens + agent.cumulative_output_tokens:,}")