Skip to content

Built-in Tools API

PatchPal includes a comprehensive set of built-in tools for file operations, git, web access, and more.

File Reading

read_file

patchpal.tools.file_reading.read_file(path)

Read the contents of a file.

Supports text files, images, and documents (PDF, DOCX, PPTX) with automatic processing.

Parameters:

Name Type Description Default
path str

Path to the file (relative to repository root or absolute)

required

Returns:

Type Description
str

The file contents as a string (text extracted from documents, base64 for images)

Raises:

Type Description
ValueError

If file is too large, unsupported binary format, or sensitive

Source code in patchpal/tools/file_reading.py
@require_permission_for_read(
    "read_file", get_description=lambda path: f"   Read: {path}", get_pattern=lambda path: path
)
def read_file(path: str) -> str:
    """
    Read the contents of a file.

    Supports text files, images, and documents (PDF, DOCX, PPTX) with automatic processing.

    Args:
        path: Path to the file (relative to repository root or absolute)

    Returns:
        The file contents as a string (text extracted from documents, base64 for images)

    Raises:
        ValueError: If file is too large, unsupported binary format, or sensitive
    """
    _operation_limiter.check_limit(f"read_file({path})")

    p = _check_path(path)

    # Get file size and MIME type
    size = p.stat().st_size
    mime_type, _ = mimetypes.guess_type(str(p))
    ext = p.suffix.lower()

    # Image formats - return as base64 data URL for vision models
    image_extensions = {".png", ".jpg", ".jpeg", ".gif", ".bmp", ".webp", ".svg", ".ico"}
    if ext in image_extensions or (mime_type and mime_type.startswith("image/")):
        # For SVG, return as text since it's XML-based
        if ext == ".svg" or mime_type == "image/svg+xml":
            # SVG is text, so apply normal size limit
            if size > config.MAX_FILE_SIZE:
                raise ValueError(
                    f"SVG file too large: {size:,} bytes (max {config.MAX_FILE_SIZE:,} bytes)\n"
                    f"Set PATCHPAL_MAX_FILE_SIZE env var to increase"
                )
            with open(p, "r", encoding="utf-8", errors="surrogateescape", newline=None) as f:
                content = f.read()
            audit_logger.info(f"READ: {path} ({size} bytes, SVG as text)")
            return content

        # For raster images, allow larger files (up to 10MB) since they're for vision models
        # Vision APIs have their own limits and will resize as needed
        # Images are formatted as multimodal content by the agent, bypassing tool output truncation
        max_image_size = config.MAX_IMAGE_SIZE
        if size > max_image_size:
            raise ValueError(
                f"Image file too large: {size:,} bytes (max {max_image_size:,} bytes)\n"
                f"Set PATCHPAL_MAX_IMAGE_SIZE env var to increase\n"
                f"Note: Most vision APIs resize images automatically, so smaller images are recommended"
            )

        # Encode as base64
        import base64

        try:
            content_bytes = p.read_bytes()
            b64_data = base64.b64encode(content_bytes).decode("utf-8")
        except Exception as e:
            raise ValueError(
                f"Failed to read or encode image file '{path}': {e}\n"
                f"The file may be corrupted or inaccessible."
            )

        # Determine MIME type
        if mime_type:
            image_mime = mime_type
        elif ext == ".jpg" or ext == ".jpeg":
            image_mime = "image/jpeg"
        elif ext == ".png":
            image_mime = "image/png"
        elif ext == ".gif":
            image_mime = "image/gif"
        elif ext == ".bmp":
            image_mime = "image/bmp"
        elif ext == ".webp":
            image_mime = "image/webp"
        elif ext == ".ico":
            image_mime = "image/x-icon"
        else:
            image_mime = "image/png"  # fallback

        audit_logger.info(f"READ: {path} ({size} bytes, IMAGE {image_mime})")

        # Return IMAGE_DATA format that agent will convert to multimodal content
        # This bypasses tool output truncation limits (PATCHPAL_MAX_TOOL_OUTPUT_CHARS)
        return f"IMAGE_DATA:{image_mime}:{b64_data}"

    # For document formats (PDF/DOCX/PPTX), extract text first, then check extracted size
    # This allows large binary documents as long as the extracted text fits in context
    # Check both MIME type and extension (Windows doesn't always recognize Office formats)
    if (mime_type and "pdf" in mime_type) or ext == ".pdf":
        # Extract text from PDF (no size check on binary - check extracted text instead)
        content_bytes = p.read_bytes()
        text_content = extract_text_from_pdf(content_bytes, source=str(path))
        audit_logger.info(
            f"READ: {path} ({size} bytes binary, {len(text_content)} chars text, PDF)"
        )
        return text_content
    elif (mime_type and ("wordprocessingml" in mime_type or "msword" in mime_type)) or ext in (
        ".docx",
        ".doc",
    ):
        # Extract text from DOCX/DOC
        content_bytes = p.read_bytes()
        text_content = extract_text_from_docx(content_bytes, source=str(path))
        audit_logger.info(
            f"READ: {path} ({size} bytes binary, {len(text_content)} chars text, DOCX)"
        )
        return text_content
    elif (mime_type and ("presentationml" in mime_type or "ms-powerpoint" in mime_type)) or ext in (
        ".pptx",
        ".ppt",
    ):
        # Extract text from PPTX/PPT
        content_bytes = p.read_bytes()
        text_content = extract_text_from_pptx(content_bytes, source=str(path))
        audit_logger.info(
            f"READ: {path} ({size} bytes binary, {len(text_content)} chars text, PPTX)"
        )
        return text_content

    # For non-document files, check size before reading
    if size > config.MAX_FILE_SIZE:
        raise ValueError(
            f"File too large: {size:,} bytes (max {config.MAX_FILE_SIZE:,} bytes)\n"
            f"Set PATCHPAL_MAX_FILE_SIZE env var to increase"
        )

    # Check if binary (for non-document files)
    if _is_binary_file(p):
        raise ValueError(
            f"Cannot read binary file: {path}\nType: {mime_type or 'unknown'}\n"
            f"Supported document formats: PDF, DOCX, PPTX"
        )

    # Read as text file
    with open(p, "r", encoding="utf-8", errors="surrogateescape", newline=None) as f:
        content = f.read()
    audit_logger.info(f"READ: {path} ({size} bytes)")
    return content

read_lines

patchpal.tools.file_reading.read_lines(path, start_line, end_line=None)

Read specific lines from a file.

Parameters:

Name Type Description Default
path str

Path to the file (relative to repository root or absolute)

required
start_line int

Starting line number (1-indexed)

required
end_line Optional[int]

Ending line number (inclusive, 1-indexed). If omitted, reads only start_line

None

Returns:

Type Description
str

The requested lines with line numbers

Raises:

Type Description
ValueError

If file not found, binary, sensitive, or line numbers invalid

Examples:

read_lines("src/auth.py", 45, 60) # Read lines 45-60 read_lines("src/auth.py", 45) # Read only line 45

Tip

Use wc -l filename shell command to find total line count for reading from end

Source code in patchpal/tools/file_reading.py
@require_permission_for_read(
    "read_lines",
    get_description=lambda path,
    start_line,
    end_line=None: f"   Read lines {start_line}-{end_line or start_line}: {path}",
    get_pattern=lambda path, start_line, end_line=None: path,
)
def read_lines(path: str, start_line: int, end_line: Optional[int] = None) -> str:
    """
    Read specific lines from a file.

    Args:
        path: Path to the file (relative to repository root or absolute)
        start_line: Starting line number (1-indexed)
        end_line: Ending line number (inclusive, 1-indexed). If omitted, reads only start_line

    Returns:
        The requested lines with line numbers

    Raises:
        ValueError: If file not found, binary, sensitive, or line numbers invalid

    Examples:
        read_lines("src/auth.py", 45, 60)  # Read lines 45-60
        read_lines("src/auth.py", 45)       # Read only line 45

    Tip:
        Use `wc -l filename` shell command to find total line count for reading from end
    """
    _operation_limiter.check_limit(f"read_lines({path}, {start_line}-{end_line or start_line})")

    # Validate line numbers
    if start_line < 1:
        raise ValueError(f"start_line must be >= 1, got {start_line}")

    if end_line is None:
        end_line = start_line
    elif end_line < start_line:
        raise ValueError(f"end_line ({end_line}) must be >= start_line ({start_line})")

    p = _check_path(path)

    # Check if binary
    if _is_binary_file(p):
        raise ValueError(
            f"Cannot read binary file: {path}\nType: {mimetypes.guess_type(str(p))[0] or 'unknown'}"
        )

    # Read file and extract lines
    try:
        with open(p, "r", encoding="utf-8", errors="surrogateescape", newline=None) as f:
            lines = f.readlines()
    except Exception as e:
        raise ValueError(f"Failed to read file: {e}")

    total_lines = len(lines)

    # Check if line numbers are within range
    if start_line > total_lines:
        raise ValueError(f"start_line {start_line} exceeds file length ({total_lines} lines)")

    # Adjust end_line if it exceeds file length
    actual_end_line = min(end_line, total_lines)

    # Extract requested lines (convert to 0-indexed)
    requested_lines = lines[start_line - 1 : actual_end_line]

    # Format output with line numbers
    result = []
    for i, line in enumerate(requested_lines, start=start_line):
        # Remove trailing newline for cleaner output
        result.append(f"{i:4d}  {line.rstrip()}")

    output = "\n".join(result)

    # Add note if we truncated end_line
    if actual_end_line < end_line:
        output += (
            f"\n\n(Note: Requested lines up to {end_line}, but file only has {total_lines} lines)"
        )

    audit_logger.info(
        f"READ_LINES: {path} lines {start_line}-{actual_end_line} ({len(requested_lines)} lines)"
    )
    return output

File Writing

write_file

patchpal.tools.file_writing.write_file(path, content)

Write complete file contents from scratch.

Overwrites existing files entirely or creates new ones. Use edit_file for small targeted changes (1-20 lines).

Parameters:

Name Type Description Default
path str

Relative path to the file from the repository root

required
content str

Complete file content (entire file, not just changes)

required

Returns:

Type Description
str

A confirmation message with the unified diff

Raises:

Type Description
ValueError

If in read-only mode or file is too large

Source code in patchpal/tools/file_writing.py
def write_file(path: str, content: str) -> str:
    """
    Write complete file contents from scratch.

    Overwrites existing files entirely or creates new ones. Use edit_file for
    small targeted changes (1-20 lines).

    Args:
        path: Relative path to the file from the repository root
        content: Complete file content (entire file, not just changes)

    Returns:
        A confirmation message with the unified diff

    Raises:
        ValueError: If in read-only mode or file is too large
    """
    _operation_limiter.check_limit(f"write_file({path})")

    if config.READ_ONLY:
        raise ValueError(
            "Cannot modify files in read-only mode\n"
            "Set PATCHPAL_READ_ONLY=false to allow modifications"
        )

    p = _check_path(path, must_exist=False)

    # Check size of new content
    new_size = len(content.encode("utf-8"))
    if new_size > config.MAX_FILE_SIZE:
        raise ValueError(
            f"New content too large: {new_size:,} bytes (max {config.MAX_FILE_SIZE:,} bytes)"
        )

    # Read old content if file exists (needed for diff in permission prompt)
    old_content = ""
    if p.exists():
        with open(p, "r", encoding="utf-8", errors="surrogateescape", newline=None) as f:
            old_content = f.read()
        old = old_content.splitlines(keepends=True)
    else:
        old = []

    # Check permission with colored diff
    permission_manager = _get_permission_manager()
    operation = "Create" if not p.exists() else "Update"
    diff_display = _format_colored_diff(old_content, content, file_path=path)

    # Get permission pattern (directory for outside repo, relative path for inside)
    permission_pattern = _get_permission_pattern_for_path(path, p)

    # Add warning if writing outside repository (unless it's PatchPal's managed files)
    outside_repo_warning = _get_outside_repo_warning(p)

    description = f"   ● {operation}({path}){outside_repo_warning}\n{diff_display}"

    if not permission_manager.request_permission(
        "write_file", description, pattern=permission_pattern
    ):
        return "Operation cancelled by user."

    # Check git status for uncommitted changes (only for files inside repo)
    git_status = _check_git_status()
    git_warning = ""
    if _is_inside_repo(p) and git_status.get("is_repo") and git_status.get("has_uncommitted"):
        relative_path = str(p.relative_to(common.REPO_ROOT))
        if any(relative_path in change for change in git_status.get("changes", [])):
            git_warning = "\n⚠️  Note: File has uncommitted changes in git\n"

    # Backup existing file
    backup_path = None
    if p.exists():
        backup_path = _backup_file(p)

    new = content.splitlines(keepends=True)

    # Generate diff
    diff = difflib.unified_diff(
        old,
        new,
        fromfile=f"{path} (before)",
        tofile=f"{path} (after)",
    )
    diff_str = "".join(diff)

    # Check if critical file
    warning = ""
    if _is_critical_file(p):
        warning = "\n⚠️  WARNING: Modifying critical infrastructure file!\n"

    # Write the new content
    p.parent.mkdir(parents=True, exist_ok=True)
    with open(p, "w", encoding="utf-8", errors="surrogateescape", newline="\n") as f:
        f.write(content)

    # Audit log
    audit_logger.info(
        f"WRITE: {path} ({new_size} bytes)" + (f" [BACKUP: {backup_path}]" if backup_path else "")
    )

    backup_msg = f"\n[Backup saved: {backup_path}]" if backup_path else ""

    return f"Successfully updated {path}{warning}{git_warning}{backup_msg}\n\nDiff:\n{diff_str}"

edit_file

patchpal.tools.file_writing.edit_file(path, old_string, new_string)

Edit a file by replacing a string match with flexible whitespace handling.

Uses multiple matching strategies to find old_string: 1. Exact match 2. Trimmed line match (ignores indentation differences in search) 3. Normalized whitespace match (ignores spacing differences in search)

Important: The flexible matching only applies to FINDING old_string. The new_string is used exactly as provided, so it should include proper indentation/formatting to match the surrounding code.

Parameters:

Name Type Description Default
path str

Relative path to the file from the repository root

required
old_string str

The string to find (whitespace can be approximate)

required
new_string str

The replacement string (use exact whitespace/indentation you want)

required

Returns:

Type Description
str

Confirmation message with the changes made

Raises:

Type Description
ValueError

If file not found, old_string not found, or multiple matches

Example
Find with flexible matching, but provide new_string with proper indent

edit_file("test.py", "print('hello')", " print('world')") # 4 spaces

Source code in patchpal/tools/file_writing.py
def edit_file(path: str, old_string: str, new_string: str) -> str:
    """
    Edit a file by replacing a string match with flexible whitespace handling.

    Uses multiple matching strategies to find old_string:
    1. Exact match
    2. Trimmed line match (ignores indentation differences in search)
    3. Normalized whitespace match (ignores spacing differences in search)

    Important: The flexible matching only applies to FINDING old_string.
    The new_string is used exactly as provided, so it should include proper
    indentation/formatting to match the surrounding code.

    Args:
        path: Relative path to the file from the repository root
        old_string: The string to find (whitespace can be approximate)
        new_string: The replacement string (use exact whitespace/indentation you want)

    Returns:
        Confirmation message with the changes made

    Raises:
        ValueError: If file not found, old_string not found, or multiple matches

    Example:
        # Find with flexible matching, but provide new_string with proper indent
        edit_file("test.py", "print('hello')", "    print('world')")  # 4 spaces
    """
    _operation_limiter.check_limit(f"edit_file({path[:30]}...)")

    if config.READ_ONLY:
        raise ValueError(
            "Cannot edit files in read-only mode\n"
            "Set PATCHPAL_READ_ONLY=false to allow modifications"
        )

    p = _check_path(path, must_exist=True)

    # Read current content
    try:
        with open(p, "r", encoding="utf-8", errors="surrogateescape", newline=None) as f:
            content = f.read()
    except Exception as e:
        raise ValueError(f"Failed to read file: {e}")

    # Try to find a match using multiple strategies
    matched_string = _find_match_with_strategies(content, old_string)

    if not matched_string:
        # No match found with any strategy
        raise ValueError(
            f"String not found in {path}.\n\n"
            f"Searched for:\n{old_string[:200]}\n\n"
            f"💡 Tip: Use read_lines() to see exact content."
        )

    # Count occurrences of the matched string
    count = content.count(matched_string)
    if count > 1:
        # Show WHERE the matches are
        positions = []
        start = 0
        while True:
            pos = content.find(matched_string, start)
            if pos == -1:
                break
            line_num = content[:pos].count("\n") + 1
            positions.append(line_num)
            start = pos + 1

        raise ValueError(
            f"String appears {count} times in {path} at lines: {positions}\n"
            f"Add more context (3-5 surrounding lines) to make it unique.\n\n"
            f"💡 Tip: Use read_lines() to see the exact context."
        )

    # Perform indentation adjustment and trailing newline preservation BEFORE showing diff
    # Important: Adjust indentation and preserve trailing newlines to maintain file structure
    adjusted_new_string = new_string

    # Step 1: Adjust indentation if needed
    # Get the indentation of the first line in matched_string vs new_string
    matched_lines = matched_string.split("\n")
    new_lines = new_string.split("\n")

    if matched_lines and new_lines and matched_lines[0] and new_lines[0]:
        # Get leading whitespace of first line in matched string
        matched_indent = len(matched_lines[0]) - len(matched_lines[0].lstrip())
        new_indent = len(new_lines[0]) - len(new_lines[0].lstrip())

        if matched_indent != new_indent:
            # Need to adjust indentation
            indent_diff = matched_indent - new_indent

            # Apply the indentation adjustment to all non-empty lines in new_string
            adjusted_lines = []
            for line in new_lines:
                if line.strip():  # Non-empty line
                    if indent_diff > 0:
                        # Need to add spaces
                        adjusted_lines.append((" " * indent_diff) + line)
                    else:
                        # Need to remove spaces (if possible)
                        spaces_to_remove = abs(indent_diff)
                        if line[:spaces_to_remove].strip() == "":  # All spaces
                            adjusted_lines.append(line[spaces_to_remove:])
                        else:
                            # Can't remove that many spaces, keep as-is
                            adjusted_lines.append(line)
                else:
                    # Empty line, keep as-is
                    adjusted_lines.append(line)

            adjusted_new_string = "\n".join(adjusted_lines)

    # Step 2: Preserve trailing newlines from matched_string
    if matched_string.endswith("\n") and not adjusted_new_string.endswith("\n"):
        # Matched block had trailing newline(s), preserve them
        # Count consecutive trailing newlines in matched_string
        trailing_newlines = len(matched_string) - len(matched_string.rstrip("\n"))
        adjusted_new_string = adjusted_new_string + ("\n" * trailing_newlines)

    # Check permission before proceeding (use adjusted_new_string for accurate diff display)
    permission_manager = _get_permission_manager()

    # Format colored diff for permission prompt (use adjusted_new_string so user sees what will actually be written)
    diff_display = _format_colored_diff(matched_string, adjusted_new_string, file_path=path)

    # Get permission pattern (directory for outside repo, relative path for inside)
    permission_pattern = _get_permission_pattern_for_path(path, p)

    # Add warning if writing outside repository (unless it's PatchPal's managed files)
    outside_repo_warning = _get_outside_repo_warning(p)

    description = f"   ● Update({path}){outside_repo_warning}\n{diff_display}"

    if not permission_manager.request_permission(
        "edit_file", description, pattern=permission_pattern
    ):
        return "Operation cancelled by user."

    # Backup if enabled
    backup_path = _backup_file(p)

    new_content = content.replace(matched_string, adjusted_new_string)

    # Write the new content
    with open(p, "w", encoding="utf-8", errors="surrogateescape", newline="\n") as f:
        f.write(new_content)

    # Generate diff for the specific change (use adjusted_new_string for accurate diff)
    old_lines = matched_string.split("\n")
    new_lines = adjusted_new_string.split("\n")
    diff = difflib.unified_diff(old_lines, new_lines, fromfile="old", tofile="new", lineterm="")
    diff_str = "\n".join(diff)

    audit_logger.info(f"EDIT: {path} ({len(matched_string)} -> {len(adjusted_new_string)} chars)")

    backup_msg = f"\n[Backup saved: {backup_path}]" if backup_path else ""
    return f"Successfully edited {path}{backup_msg}\n\nChange:\n{diff_str}"

Code Analysis

code_structure

patchpal.tools.code_analysis.code_structure(path, max_symbols=50)

Analyze code structure using tree-sitter AST parsing.

Returns a compact view of: - File statistics (lines, size) - Functions with signatures and line numbers - Classes with methods - Module/file docstring (if present)

This is much more efficient than read_file for understanding code layout. Supports 40+ languages via tree-sitter.

Parameters:

Name Type Description Default
path str

File path to analyze (relative or absolute)

required
max_symbols int

Maximum number of symbols to show (default: 50)

50

Returns:

Type Description
str

Formatted code structure overview

Examples:

>>> code_structure("patchpal/tools.py")
File: patchpal/tools.py (2883 lines, 89.2 KB)

Functions (45): Line 123: def read_file(path: str, *, encoding: str = "utf-8") -> str Line 234: def write_file(path: str, content: str) -> str ...

Use read_lines('patchpal/tools.py', start, end) to read specific sections.

Source code in patchpal/tools/code_analysis.py
def code_structure(path: str, max_symbols: int = 50) -> str:
    """
    Analyze code structure using tree-sitter AST parsing.

    Returns a compact view of:
    - File statistics (lines, size)
    - Functions with signatures and line numbers
    - Classes with methods
    - Module/file docstring (if present)

    This is much more efficient than read_file for understanding code layout.
    Supports 40+ languages via tree-sitter.

    Args:
        path: File path to analyze (relative or absolute)
        max_symbols: Maximum number of symbols to show (default: 50)

    Returns:
        Formatted code structure overview

    Examples:
        >>> code_structure("patchpal/tools.py")
        File: patchpal/tools.py (2883 lines, 89.2 KB)

        Functions (45):
          Line  123: def read_file(path: str, *, encoding: str = "utf-8") -> str
          Line  234: def write_file(path: str, content: str) -> str
          ...

        Use read_lines('patchpal/tools.py', start, end) to read specific sections.
    """
    _operation_limiter.check_limit(f"code_structure({path})")

    if not TREE_SITTER_AVAILABLE:
        return (
            "❌ Tree-sitter not available. Install with: pip install tree-sitter-language-pack\n\n"
            "Fallback: Use read_lines() to read specific sections of the file."
        )

    # Validate and resolve path
    resolved_path = _check_path(path, must_exist=True)

    # Detect language
    ext = resolved_path.suffix.lstrip(".")
    language_name = LANGUAGE_MAP.get(ext)

    if not language_name:
        # Unsupported language, return basic info
        return _basic_file_info(resolved_path, path)

    try:
        # Get parser for language
        parser = get_parser(language_name)

        # Read and parse file
        with open(resolved_path, "rb") as f:
            source = f.read()

        tree = parser.parse(source)
        root = tree.root_node

        # Extract symbols
        symbols = _extract_symbols(root, language_name, source)

        # Format output
        result = _format_output(resolved_path, path, symbols, max_symbols, source)

        audit_logger.info(f"CODE_STRUCTURE: {path} ({len(symbols)} symbols)")
        return result

    except Exception as e:
        # Fallback to basic info if parsing fails
        audit_logger.warning(f"CODE_STRUCTURE failed for {path}: {e}")
        return _basic_file_info(resolved_path, path) + f"\n\n⚠️  Tree-sitter parsing failed: {e}"

Repository Map

get_repo_map

patchpal.tools.repo_map.get_repo_map(max_files=100, include_patterns=None, exclude_patterns=None, focus_files=None)

Generate a compact repository map showing code structure across all files.

This provides a bird's-eye view of the codebase, showing function and class signatures without their implementations. Much more token-efficient than reading individual files.

Supports 20+ languages including Python, JavaScript, TypeScript, Go, Rust, Java, C/C++, C#, Ruby, PHP, Swift, Kotlin, Scala, Elm, Elixir, and more.

Parameters:

Name Type Description Default
max_files int

Maximum number of files to include (default: 100)

100
include_patterns Optional[List[str]]

Glob patterns to include (e.g., ['.py', '.js'])

None
exclude_patterns Optional[List[str]]

Glob patterns to exclude (e.g., ['test', '*_pb2.py'])

None
focus_files Optional[List[str]]

Files mentioned in conversation (prioritized in output)

None

Returns:

Type Description
str

Formatted repository map with file structures

Examples:

>>> get_repo_map(max_files=50)
Repository Map (50 files):

src/auth.py: Line 45: def login(username: str, password: str) -> bool Line 67: def logout(session_id: str) -> None Line 89: class AuthManager:

src/database.py: Line 23: class Database: Line 45: def connect(self) -> None ...

Token Efficiency
  • Traditional approach: Read 50 files × 2,000 tokens = 100,000 tokens
  • With repo map: 50 files × 150 tokens = 7,500 tokens
  • Savings: 92.5%
Source code in patchpal/tools/repo_map.py
def get_repo_map(
    max_files: int = 100,
    include_patterns: Optional[List[str]] = None,
    exclude_patterns: Optional[List[str]] = None,
    focus_files: Optional[List[str]] = None,
) -> str:
    """Generate a compact repository map showing code structure across all files.

    This provides a bird's-eye view of the codebase, showing function and class
    signatures without their implementations. Much more token-efficient than
    reading individual files.

    Supports 20+ languages including Python, JavaScript, TypeScript, Go, Rust,
    Java, C/C++, C#, Ruby, PHP, Swift, Kotlin, Scala, Elm, Elixir, and more.

    Args:
        max_files: Maximum number of files to include (default: 100)
        include_patterns: Glob patterns to include (e.g., ['*.py', '*.js'])
        exclude_patterns: Glob patterns to exclude (e.g., ['*test*', '*_pb2.py'])
        focus_files: Files mentioned in conversation (prioritized in output)

    Returns:
        Formatted repository map with file structures

    Examples:
        >>> get_repo_map(max_files=50)
        Repository Map (50 files):

        src/auth.py:
          Line   45: def login(username: str, password: str) -> bool
          Line   67: def logout(session_id: str) -> None
          Line   89: class AuthManager:

        src/database.py:
          Line   23: class Database:
          Line   45:   def connect(self) -> None
          ...

    Token Efficiency:
        - Traditional approach: Read 50 files × 2,000 tokens = 100,000 tokens
        - With repo map: 50 files × 150 tokens = 7,500 tokens
        - Savings: 92.5%
    """
    _operation_limiter.check_limit(f"get_repo_map(max_files={max_files})")

    audit_logger.info(
        f"REPO_MAP: Generating (max_files={max_files}, "
        f"include={include_patterns}, exclude={exclude_patterns})"
    )

    # Get supported file extensions
    supported_extensions = set(LANGUAGE_MAP.keys())

    # Convert patterns to sets for faster lookup
    focus_set = set(focus_files or [])

    # Collect all code files
    file_structures: Dict[str, str] = {}
    skipped_count = 0

    for path in REPO_ROOT.rglob("*"):
        # Skip directories, hidden files, and non-code files
        if not path.is_file():
            continue
        if any(part.startswith(".") for part in path.parts):
            continue

        ext = path.suffix.lstrip(".")
        if ext not in supported_extensions:
            continue

        # Get relative path
        try:
            rel_path = path.relative_to(REPO_ROOT)
        except ValueError:
            continue

        # Apply include/exclude patterns
        if include_patterns:
            if not any(rel_path.match(pattern) for pattern in include_patterns):
                skipped_count += 1
                continue
        if exclude_patterns:
            if any(rel_path.match(pattern) for pattern in exclude_patterns):
                skipped_count += 1
                continue

        # Try to get from cache
        structure = _REPO_MAP_CACHE.get(path)

        if structure is None:
            # Generate structure
            try:
                structure = code_structure(str(rel_path), max_symbols=20)
                if structure and not structure.startswith("❌"):
                    # Extract just the essential parts (remove hints and verbose info)
                    lines = structure.split("\n")
                    essential_lines = []
                    for line in lines:
                        # Skip hint lines, empty lines, and file header
                        if line.startswith("💡") or line.startswith("File:"):
                            continue
                        if line.strip():
                            essential_lines.append(line)

                    # Limit to 30 lines per file to keep it compact
                    structure = "\n".join(essential_lines[:30])
                    _REPO_MAP_CACHE.set(path, structure)
                else:
                    structure = None
            except Exception:
                structure = None

        if structure:
            file_structures[str(rel_path)] = structure

    # Mark that we've completed a scan
    _REPO_MAP_CACHE.mark_scanned()

    # Rank files (focus files first, then alphabetically)
    def rank_file(path: str) -> Tuple[int, str]:
        # Priority 0 = focus files, 1 = normal files
        priority = 0 if path in focus_set else 1
        return (priority, path)

    ranked_files = sorted(file_structures.keys(), key=rank_file)

    # Build output (limit to max_files)
    total_files = len(ranked_files)
    showing_files = min(max_files, total_files)

    output_lines = [f"Repository Map ({total_files} files analyzed, showing {showing_files}):\n"]

    if skipped_count > 0:
        output_lines.append(f"(Skipped {skipped_count} files based on include/exclude patterns)\n")

    for file_path in ranked_files[:max_files]:
        structure = file_structures[file_path]
        output_lines.append(f"\n{file_path}:")

        # Show structure (truncate if needed for extremely long files)
        structure_preview = structure[:800]  # ~250 tokens max per file
        if len(structure) > 800:
            structure_preview += "\n  [... more symbols omitted ...]"

        output_lines.append(structure_preview)

    # Add footer with helpful information
    if total_files > max_files:
        output_lines.append(f"\n... and {total_files - max_files} more files not shown")
        output_lines.append(
            "\n💡 Increase max_files parameter or use include_patterns to refine results"
        )

    output_lines.append("\n💡 Use code_structure(path) to see full details for a specific file")
    output_lines.append("💡 Use read_file(path) to see complete implementation")

    result = "\n".join(output_lines)

    # Calculate rough token estimate (1 char ≈ 0.3 tokens for code)
    estimated_tokens = len(result) // 3

    audit_logger.info(
        f"REPO_MAP: Generated {len(result):,} chars (~{estimated_tokens:,} tokens) "
        f"for {total_files} files"
    )

    return result

get_repo_map_stats

patchpal.tools.repo_map.get_repo_map_stats()

Get statistics about the repository map cache.

Returns:

Type Description
Dict[str, any]

Dictionary with cache statistics including:

Dict[str, any]
  • cached_files: Number of files in cache
Dict[str, any]
  • last_scan: Timestamp of last full scan
Dict[str, any]
  • cache_age: Seconds since last scan
Source code in patchpal/tools/repo_map.py
def get_repo_map_stats() -> Dict[str, any]:
    """Get statistics about the repository map cache.

    Returns:
        Dictionary with cache statistics including:
        - cached_files: Number of files in cache
        - last_scan: Timestamp of last full scan
        - cache_age: Seconds since last scan
    """
    return {
        "cached_files": len(_REPO_MAP_CACHE.cache),
        "last_scan": _REPO_MAP_CACHE.last_full_scan,
        "cache_age": time.time() - _REPO_MAP_CACHE.last_full_scan,
    }

clear_repo_map_cache

patchpal.tools.repo_map.clear_repo_map_cache()

Clear the repository map cache.

Useful if files have been added/removed outside of PatchPal's awareness, or if you want to force a fresh scan.

Source code in patchpal/tools/repo_map.py
def clear_repo_map_cache():
    """Clear the repository map cache.

    Useful if files have been added/removed outside of PatchPal's awareness,
    or if you want to force a fresh scan.
    """
    global _REPO_MAP_CACHE
    _REPO_MAP_CACHE = RepoMapCache()
    audit_logger.info("REPO_MAP: Cache cleared")

Shell Execution

run_shell

patchpal.tools.shell_tools.run_shell(cmd)

Run a safe shell command in the repository.

Parameters:

Name Type Description Default
cmd str

The shell command to execute

required

Returns:

Type Description
str

Combined stdout and stderr output

Raises:

Type Description
ValueError

If command contains forbidden operations

Source code in patchpal/tools/shell_tools.py
def run_shell(cmd: str) -> str:
    """
    Run a safe shell command in the repository.

    Args:
        cmd: The shell command to execute

    Returns:
        Combined stdout and stderr output

    Raises:
        ValueError: If command contains forbidden operations
    """
    # Check permission before proceeding
    permission_manager = _get_permission_manager()
    description = f"   {cmd}"
    # Extract meaningful command pattern and working directory, handling compound commands
    command_name, working_dir = _extract_shell_command_info(cmd)

    # Create composite pattern: "command@directory" for cd commands, just "command" otherwise
    # Using @ separator for cross-platform compatibility (: would conflict with Windows paths like C:\temp)
    if working_dir and command_name:
        pattern = f"{command_name}@{working_dir}"
    else:
        pattern = command_name

    # Pass working_dir separately for display purposes
    if not permission_manager.request_permission(
        "run_shell", description, pattern=pattern, context=working_dir, full_command=cmd
    ):
        return "Operation cancelled by user."

    _operation_limiter.check_limit(f"run_shell({cmd[:50]}...)")

    # Check for dangerous tokens (privilege escalation commands)
    # Token-based matching: splits command and checks each token
    if any(tok in DANGEROUS_TOKENS for tok in cmd.split()):
        raise ValueError(
            f"Blocked dangerous command: {cmd}\nForbidden operations: {', '.join(DANGEROUS_TOKENS)}"
        )

    # Check for dangerous patterns (destructive operations)
    # Substring matching: checks if pattern appears anywhere in command
    for pattern in DANGEROUS_PATTERNS:
        if pattern in cmd:
            raise ValueError(
                f"Blocked dangerous pattern in command: {pattern}\nFull command: {cmd}"
            )

    audit_logger.info(f"SHELL: {cmd}")

    result = subprocess.run(
        cmd,
        shell=True,
        capture_output=True,
        cwd=common.REPO_ROOT,
        timeout=config.SHELL_TIMEOUT,
    )

    # Decode output with error handling for problematic characters
    # Use utf-8 on all platforms with 'replace' to handle encoding issues
    stdout = result.stdout.decode("utf-8", errors="replace") if result.stdout else ""
    stderr = result.stderr.decode("utf-8", errors="replace") if result.stderr else ""

    output = stdout + stderr

    # Apply output filtering to reduce token usage
    if OutputFilter.should_filter(cmd):
        filtered_output = OutputFilter.filter_output(cmd, output)
        # Log if we filtered significantly
        original_lines = len(output.split("\n"))
        filtered_lines = len(filtered_output.split("\n"))
        if filtered_lines < original_lines * 0.5:
            audit_logger.info(
                f"SHELL_FILTER: Reduced output from {original_lines} to {filtered_lines} lines "
                f"(~{int((1 - filtered_lines / original_lines) * 100)}% reduction)"
            )
        return filtered_output

    return output

Web Tools

Search the web using DuckDuckGo and return results.

Parameters:

Name Type Description Default
query str

The search query

required
max_results int

Maximum number of results to return (default: 5, max: 10)

5

Returns:

Type Description
str

Formatted search results with titles, URLs, and snippets

Raises:

Type Description
ValueError

If search fails

Source code in patchpal/tools/web_tools.py
def web_search(query: str, max_results: int = 5) -> str:
    """
    Search the web using DuckDuckGo and return results.

    Args:
        query: The search query
        max_results: Maximum number of results to return (default: 5, max: 10)

    Returns:
        Formatted search results with titles, URLs, and snippets

    Raises:
        ValueError: If search fails
    """
    # Check permission before proceeding
    permission_manager = _get_permission_manager()
    description = f"   Search: {query}"
    if not permission_manager.request_permission("web_search", description):
        return "Operation cancelled by user."

    _operation_limiter.check_limit(f"web_search({query[:30]}...)")

    # Limit max_results
    max_results = min(max_results, 10)

    try:
        # Determine SSL verification setting
        # Priority: PATCHPAL_VERIFY_SSL env var > SSL_CERT_FILE > REQUESTS_CA_BUNDLE > default True
        verify_ssl = config.VERIFY_SSL
        if verify_ssl is not None:
            # User explicitly set PATCHPAL_VERIFY_SSL
            if verify_ssl.lower() in ("false", "0", "no"):
                verify = False
            elif verify_ssl.lower() in ("true", "1", "yes"):
                verify = True
            else:
                # Treat as path to CA bundle
                verify = verify_ssl
        else:
            # Use SSL_CERT_FILE or REQUESTS_CA_BUNDLE if set (for corporate environments)
            verify = os.getenv("SSL_CERT_FILE") or os.getenv("REQUESTS_CA_BUNDLE") or True

        # Perform search using DuckDuckGo
        with DDGS(verify=verify) as ddgs:
            results = list(ddgs.text(query, max_results=max_results))

        if not results:
            audit_logger.info(f"WEB_SEARCH: {query} - No results")
            return f"No search results found for: {query}"

        # Format results
        formatted_results = [f"Search results for: {query}\n"]
        for i, result in enumerate(results, 1):
            title = result.get("title", "No title")
            url = result.get("href", "No URL")
            snippet = result.get("body", "No description")

            formatted_results.append(f"\n{i}. {title}\n   URL: {url}\n   {snippet}")

        output = "\n".join(formatted_results)
        audit_logger.info(f"WEB_SEARCH: {query} - Found {len(results)} results")
        return output

    except Exception as e:
        error_msg = str(e)

        # Provide helpful error messages for common issues
        if "CERTIFICATE_VERIFY_FAILED" in error_msg or "TLS handshake failed" in error_msg:
            return (
                "Web search unavailable: SSL certificate verification failed.\n"
                "This may be due to:\n"
                "- Corporate proxy/firewall blocking requests\n"
                "- Network configuration issues\n"
                "- VPN interference\n\n"
                "Consider using web_fetch with a specific URL if you have one."
            )
        elif "RuntimeError" in error_msg or "error sending request" in error_msg:
            return (
                "Web search unavailable: Network connection failed.\n"
                "Please check your internet connection and try again."
            )
        else:
            raise ValueError(f"Web search failed: {e}")

web_fetch

patchpal.tools.web_tools.web_fetch(url, extract_text=True)

Fetch content from a URL and optionally extract readable text.

Parameters:

Name Type Description Default
url str

The URL to fetch

required
extract_text bool

If True, extract readable text from HTML/PDF (default: True)

True

Returns:

Type Description
str

The fetched content (text extracted from HTML/PDF if extract_text=True)

Raises:

Type Description
ValueError

If request fails or content is too large

Source code in patchpal/tools/web_tools.py
def web_fetch(url: str, extract_text: bool = True) -> str:
    """
    Fetch content from a URL and optionally extract readable text.

    Args:
        url: The URL to fetch
        extract_text: If True, extract readable text from HTML/PDF (default: True)

    Returns:
        The fetched content (text extracted from HTML/PDF if extract_text=True)

    Raises:
        ValueError: If request fails or content is too large
    """
    # Check permission before proceeding
    permission_manager = _get_permission_manager()
    description = f"   Fetch: {url}"
    if not permission_manager.request_permission("web_fetch", description):
        return "Operation cancelled by user."

    _operation_limiter.check_limit(f"web_fetch({url[:50]}...)")

    # Validate URL format
    if not url.startswith(("http://", "https://")):
        raise ValueError("URL must start with http:// or https://")

    try:
        # Make request with timeout and browser-like headers
        response = requests.get(
            url,
            timeout=config.WEB_TIMEOUT,
            headers=WEB_HEADERS,
            stream=True,  # Stream to check size first
            allow_redirects=True,  # Follow redirects (including moved repos)
        )
        response.raise_for_status()

        # Check content size
        content_length = response.headers.get("Content-Length")
        if content_length and int(content_length) > config.MAX_WEB_SIZE:
            raise ValueError(
                f"Content too large: {int(content_length):,} bytes "
                f"(max {config.MAX_WEB_SIZE:,} bytes)"
            )

        # Read content with size limit
        content = b""
        for chunk in response.iter_content(chunk_size=8192):
            content += chunk
            if len(content) > config.MAX_WEB_SIZE:
                raise ValueError(f"Content exceeds size limit ({config.MAX_WEB_SIZE:,} bytes)")

        # Get content type
        content_type = response.headers.get("Content-Type", "").lower()

        # Extract text based on content type
        if extract_text:
            if "pdf" in content_type:
                # Extract text from PDF
                try:
                    text_content = extract_text_from_pdf(content, source=url)
                except ValueError as e:
                    # Return helpful error message if extraction fails
                    text_content = f"[{e}]"
            elif "wordprocessingml" in content_type or "msword" in content_type:
                # Extract text from DOCX (or DOC if saved as docx)
                try:
                    text_content = extract_text_from_docx(content, source=url)
                except ValueError as e:
                    text_content = f"[{e}]"
            elif "presentationml" in content_type or "ms-powerpoint" in content_type:
                # Extract text from PPTX (or PPT if saved as pptx)
                try:
                    text_content = extract_text_from_pptx(content, source=url)
                except ValueError as e:
                    text_content = f"[{e}]"
            elif "html" in content_type:
                # Extract text from HTML
                text_content = content.decode(response.encoding or "utf-8", errors="replace")
                soup = BeautifulSoup(text_content, "html.parser")

                # Remove script and style elements
                for element in soup(["script", "style", "nav", "footer", "header"]):
                    element.decompose()

                # Get text
                text = soup.get_text()

                # Clean up whitespace
                lines = (line.strip() for line in text.splitlines())
                chunks = (phrase.strip() for line in lines for phrase in line.split("  "))
                text_content = "\n".join(chunk for chunk in chunks if chunk)
            else:
                # For other content types, check if it's a known binary format
                binary_formats = [
                    "image/",
                    "video/",
                    "audio/",
                    "application/zip",
                    "application/x-zip",
                    "application/x-rar",
                    "application/x-tar",
                    "spreadsheetml",  # Excel files (xlsx) - not yet supported
                    "ms-excel",  # Legacy Excel files (xls) - not yet supported
                    "application/octet-stream",
                ]
                is_binary = any(fmt in content_type for fmt in binary_formats)

                if is_binary:
                    text_content = (
                        f"[WARNING: Unsupported binary format]\n\n"
                        f"Content-Type: {content_type}\n"
                        f"URL: {url}\n\n"
                        f"This appears to be a binary file format that cannot be extracted as text.\n"
                        f"Supported formats: HTML, PDF, DOCX, PPTX, plain text, JSON, XML.\n"
                        f"To access this content, download it locally or use a format-specific tool."
                    )
                else:
                    # Assume it's text-based (JSON, XML, CSV, etc.)
                    text_content = content.decode(response.encoding or "utf-8", errors="replace")
        else:
            # No text extraction - just decode
            text_content = content.decode(response.encoding or "utf-8", errors="replace")

        # Note: Output truncation is handled by universal MAX_TOOL_OUTPUT_CHARS limit in agent.py
        audit_logger.info(f"WEB_FETCH: {url} ({len(text_content)} chars)")
        return text_content

    except requests.Timeout:
        raise ValueError(f"Request timed out after {config.WEB_TIMEOUT} seconds")
    except requests.RequestException as e:
        raise ValueError(f"Failed to fetch URL: {e}")
    except Exception as e:
        raise ValueError(f"Error processing content: {e}")

TODO Management

todo_add

patchpal.tools.todo_tools.todo_add(description, details='')

Add a new task to the TODO list.

Use this to break down complex tasks into manageable subtasks. Each task gets a unique ID for tracking and completion.

Parameters:

Name Type Description Default
description str

Brief task description (one line)

required
details str

Optional detailed notes about the task

''

Returns:

Type Description
str

Confirmation with the task ID

Example

todo_add("Read authentication module", details="Focus on session handling logic") todo_add("Add input validation to login endpoint")

Source code in patchpal/tools/todo_tools.py
def todo_add(description: str, details: str = "") -> str:
    """
    Add a new task to the TODO list.

    Use this to break down complex tasks into manageable subtasks.
    Each task gets a unique ID for tracking and completion.

    Args:
        description: Brief task description (one line)
        details: Optional detailed notes about the task

    Returns:
        Confirmation with the task ID

    Example:
        todo_add("Read authentication module", details="Focus on session handling logic")
        todo_add("Add input validation to login endpoint")
    """
    _operation_limiter.check_limit(f"todo_add({description[:30]}...)")

    data = _load_todos()

    # Create new task
    task = {
        "id": data["next_id"],
        "description": description,
        "details": details,
        "completed": False,
        "created_at": datetime.now().isoformat(),
    }

    data["tasks"].append(task)
    data["next_id"] += 1

    _save_todos(data)

    result = f"✓ Added task #{task['id']}: {description}"
    if details:
        result += f"\n  Details: {details}"

    audit_logger.info(f"TODO_ADD: #{task['id']} - {description[:50]}")
    return result

todo_list

patchpal.tools.todo_tools.todo_list(show_completed=False)

List all tasks in the TODO list.

Parameters:

Name Type Description Default
show_completed bool

If True, show completed tasks; if False, show only pending tasks (default: False)

False

Returns:

Type Description
str

Formatted list of tasks with IDs, status, and descriptions

Source code in patchpal/tools/todo_tools.py
def todo_list(show_completed: bool = False) -> str:
    """
    List all tasks in the TODO list.

    Args:
        show_completed: If True, show completed tasks; if False, show only pending tasks (default: False)

    Returns:
        Formatted list of tasks with IDs, status, and descriptions
    """
    _operation_limiter.check_limit("todo_list()")

    data = _load_todos()
    tasks = data["tasks"]

    if not tasks:
        return "No tasks in TODO list.\n\nUse todo_add() to create a new task plan."

    # Filter tasks based on show_completed
    if show_completed:
        display_tasks = tasks
        header = "TODO List (All Tasks):"
    else:
        display_tasks = [t for t in tasks if not t["completed"]]
        header = "TODO List (Pending Tasks):"
        if not display_tasks:
            return "No pending tasks. All tasks completed! ✓\n\nUse todo_list(show_completed=True) to see completed tasks."

    separator = "=" * 80

    lines = [header, separator]

    for task in display_tasks:
        status = "✓" if task["completed"] else "○"
        lines.append(f"\n{status} Task #{task['id']}: {task['description']}")

        if task.get("details"):
            # Indent details
            detail_lines = task["details"].split("\n")
            for line in detail_lines:
                lines.append(f"  {line}")

        # Show creation time
        try:
            created = datetime.fromisoformat(task["created_at"])
            lines.append(f"  Created: {created.strftime('%Y-%m-%d %H:%M')}")
        except Exception:
            pass

        # Show completion time if completed
        if task["completed"] and task.get("completed_at"):
            try:
                completed = datetime.fromisoformat(task["completed_at"])
                lines.append(f"  Completed: {completed.strftime('%Y-%m-%d %H:%M')}")
            except Exception:
                pass

    # Summary
    total = len(tasks)
    completed = sum(1 for t in tasks if t["completed"])
    pending = total - completed

    lines.append(f"\n{separator}")
    lines.append(f"Summary: {pending} pending, {completed} completed, {total} total")

    audit_logger.info(f"TODO_LIST: {pending} pending, {completed} completed")
    return "\n".join(lines)

todo_complete

patchpal.tools.todo_tools.todo_complete(task_id)

Mark a task as completed.

Parameters:

Name Type Description Default
task_id int

The ID of the task to complete

required

Returns:

Type Description
str

Confirmation message

Example

todo_complete(1) # Mark task #1 as done

Source code in patchpal/tools/todo_tools.py
def todo_complete(task_id: int) -> str:
    """
    Mark a task as completed.

    Args:
        task_id: The ID of the task to complete

    Returns:
        Confirmation message

    Example:
        todo_complete(1)  # Mark task #1 as done
    """
    _operation_limiter.check_limit(f"todo_complete({task_id})")

    data = _load_todos()

    # Find the task
    task = None
    for t in data["tasks"]:
        if t["id"] == task_id:
            task = t
            break

    if not task:
        available_ids = [t["id"] for t in data["tasks"]]
        return f"Task #{task_id} not found.\n\nAvailable task IDs: {available_ids}\n\nUse todo_list() to see all tasks."

    if task["completed"]:
        return f"Task #{task_id} is already completed: {task['description']}"

    # Mark as completed
    task["completed"] = True
    task["completed_at"] = datetime.now().isoformat()

    _save_todos(data)

    # Show progress
    total = len(data["tasks"])
    completed = sum(1 for t in data["tasks"] if t["completed"])

    result = f"✓ Completed task #{task_id}: {task['description']}"
    result += f"\n\nProgress: {completed}/{total} tasks completed"

    audit_logger.info(f"TODO_COMPLETE: #{task_id} - {task['description'][:50]}")
    return result

todo_update

patchpal.tools.todo_tools.todo_update(task_id, description=None, details=None)

Update a task's description or details.

Parameters:

Name Type Description Default
task_id int

The ID of the task to update

required
description str

New description (optional)

None
details str

New details (optional)

None

Returns:

Type Description
str

Confirmation message

Example

todo_update(1, description="Read auth module and session handling") todo_update(2, details="Need to check for SQL injection vulnerabilities")

Source code in patchpal/tools/todo_tools.py
def todo_update(task_id: int, description: str = None, details: str = None) -> str:
    """
    Update a task's description or details.

    Args:
        task_id: The ID of the task to update
        description: New description (optional)
        details: New details (optional)

    Returns:
        Confirmation message

    Example:
        todo_update(1, description="Read auth module and session handling")
        todo_update(2, details="Need to check for SQL injection vulnerabilities")
    """
    _operation_limiter.check_limit(f"todo_update({task_id})")

    if description is None and details is None:
        return "Error: Must provide either description or details to update"

    data = _load_todos()

    # Find the task
    task = None
    for t in data["tasks"]:
        if t["id"] == task_id:
            task = t
            break

    if not task:
        available_ids = [t["id"] for t in data["tasks"]]
        return f"Task #{task_id} not found.\n\nAvailable task IDs: {available_ids}"

    # Update fields
    changes = []
    if description is not None:
        old_desc = task["description"]
        task["description"] = description
        changes.append(f"Description: '{old_desc}' → '{description}'")

    if details is not None:
        task["details"] = details
        changes.append("Details updated")

    _save_todos(data)

    result = f"✓ Updated task #{task_id}\n"
    result += "\n".join(f"  • {change}" for change in changes)

    audit_logger.info(f"TODO_UPDATE: #{task_id} - {changes}")
    return result

todo_remove

patchpal.tools.todo_tools.todo_remove(task_id)

Remove a task from the TODO list.

Parameters:

Name Type Description Default
task_id int

The ID of the task to remove

required

Returns:

Type Description
str

Confirmation message

Example

todo_remove(1) # Remove task #1

Source code in patchpal/tools/todo_tools.py
def todo_remove(task_id: int) -> str:
    """
    Remove a task from the TODO list.

    Args:
        task_id: The ID of the task to remove

    Returns:
        Confirmation message

    Example:
        todo_remove(1)  # Remove task #1
    """
    _operation_limiter.check_limit(f"todo_remove({task_id})")

    data = _load_todos()

    # Find and remove the task
    task = None
    for i, t in enumerate(data["tasks"]):
        if t["id"] == task_id:
            task = data["tasks"].pop(i)
            break

    if not task:
        available_ids = [t["id"] for t in data["tasks"]]
        return f"Task #{task_id} not found.\n\nAvailable task IDs: {available_ids}"

    _save_todos(data)

    result = f"✓ Removed task #{task_id}: {task['description']}"
    remaining = len(data["tasks"])
    result += f"\n\n{remaining} task(s) remaining in TODO list"

    audit_logger.info(f"TODO_REMOVE: #{task_id} - {task['description'][:50]}")
    return result

todo_clear

patchpal.tools.todo_tools.todo_clear(completed_only=True)

Clear tasks from the TODO list.

Parameters:

Name Type Description Default
completed_only bool

If True, clear only completed tasks; if False, clear all tasks (default: True)

True

Returns:

Type Description
str

Confirmation message

Example

todo_clear() # Clear completed tasks todo_clear(completed_only=False) # Clear all tasks (start fresh)

Source code in patchpal/tools/todo_tools.py
def todo_clear(completed_only: bool = True) -> str:
    """
    Clear tasks from the TODO list.

    Args:
        completed_only: If True, clear only completed tasks; if False, clear all tasks (default: True)

    Returns:
        Confirmation message

    Example:
        todo_clear()              # Clear completed tasks
        todo_clear(completed_only=False)  # Clear all tasks (start fresh)
    """
    _operation_limiter.check_limit("todo_clear()")

    data = _load_todos()

    if not data["tasks"]:
        return "TODO list is already empty."

    if completed_only:
        completed_tasks = [t for t in data["tasks"] if t["completed"]]
        if not completed_tasks:
            return "No completed tasks to clear."

        # Keep only pending tasks
        data["tasks"] = [t for t in data["tasks"] if not t["completed"]]
        count = len(completed_tasks)
        _save_todos(data)

        result = f"✓ Cleared {count} completed task(s)"
        remaining = len(data["tasks"])
        if remaining > 0:
            result += f"\n\n{remaining} pending task(s) remaining"
    else:
        # Clear all tasks
        count = len(data["tasks"])
        data["tasks"] = []
        _save_todos(data)

        result = f"✓ Cleared all {count} task(s)\n\nTODO list is now empty. Use todo_add() to create a new task plan."

    audit_logger.info(f"TODO_CLEAR: {count} task(s) cleared (completed_only={completed_only})")
    return result

User Interaction

list_skills

patchpal.tools.user_interaction.list_skills()

List all available skills that can be invoked.

Skills are reusable workflows stored in: - Personal: ~/.patchpal/skills/ - Project: /.patchpal/skills/

Returns:

Type Description
str

Formatted list of available skills with names and descriptions

Source code in patchpal/tools/user_interaction.py
def list_skills() -> str:
    """
    List all available skills that can be invoked.

    Skills are reusable workflows stored in:
    - Personal: ~/.patchpal/skills/
    - Project: <repo>/.patchpal/skills/

    Returns:
        Formatted list of available skills with names and descriptions
    """
    _operation_limiter.check_limit("list_skills()")

    from patchpal.skills import list_skills as discover_all_skills

    skills = discover_all_skills(repo_root=common.REPO_ROOT)

    if not skills:
        return """No skills found.

To get started:
1. View examples: https://github.com/amaiya/patchpal/tree/main/examples/skills
2. Copy examples to your personal skills directory:
   mkdir -p ~/.patchpal/skills
   # Download and copy the commit and review skills from the examples folder
3. Or create your own skill in ~/.patchpal/skills/<skill-name>/SKILL.md

Skills are markdown files with YAML frontmatter. See the examples for the format."""

    header = f"Available Skills ({len(skills)}):"
    separator = "-" * 100

    lines = [header, separator]
    for skill in skills:
        lines.append(f"  /{skill.name}")
        lines.append(f"    {skill.description}")
        lines.append("")

    lines.append("How to invoke skills:")
    lines.append("  - User types: /skill_name (e.g., /commit)")
    lines.append("  - Or just ask naturally and the agent will discover the right skill")

    audit_logger.info(f"LIST_SKILLS: {len(skills)} skill(s)")
    return "\n".join(lines)

use_skill

patchpal.tools.user_interaction.use_skill(skill_name, args='')

Invoke a skill with optional arguments.

Parameters:

Name Type Description Default
skill_name str

Name of the skill to invoke (without / prefix)

required
args str

Optional arguments to pass to the skill

''

Returns:

Type Description
str

The skill's instructions formatted with any provided arguments

Example

use_skill("commit", args="Fix bug in auth")

Source code in patchpal/tools/user_interaction.py
def use_skill(skill_name: str, args: str = "") -> str:
    """
    Invoke a skill with optional arguments.

    Args:
        skill_name: Name of the skill to invoke (without / prefix)
        args: Optional arguments to pass to the skill

    Returns:
        The skill's instructions formatted with any provided arguments

    Example:
        use_skill("commit", args="Fix bug in auth")
    """
    _operation_limiter.check_limit(f"use_skill({skill_name})")

    from patchpal.skills import get_skill

    skill = get_skill(skill_name, repo_root=common.REPO_ROOT)

    if not skill:
        available_skills = list_skills()
        return f"Skill not found: {skill_name}\n\n{available_skills}"

    # Format the skill instructions with arguments if provided
    instructions = skill.instructions
    if args:
        instructions = f"{instructions}\n\nArguments: {args}"

    audit_logger.info(f"USE_SKILL: {skill_name} (args={args[:50]})")

    return f"Skill: {skill.name}\n\n{instructions}"

ask_user

patchpal.tools.user_interaction.ask_user(question, options=None)

Ask the user a question and wait for their response.

This allows the agent to interactively clarify requirements, get decisions, or gather additional information during task execution.

Parameters:

Name Type Description Default
question str

The question to ask the user

required
options Optional[list]

Optional list of predefined answer choices (e.g., ["yes", "no", "skip"]) If provided, user can select from these or type a custom answer

None

Returns:

Type Description
str

The user's answer as a string

Example

ask_user("Which authentication method should I use?", options=["JWT", "OAuth2", "Session"]) ask_user("Should I add error handling to all endpoints?")

Source code in patchpal/tools/user_interaction.py
def ask_user(question: str, options: Optional[list] = None) -> str:
    """
    Ask the user a question and wait for their response.

    This allows the agent to interactively clarify requirements, get decisions,
    or gather additional information during task execution.

    Args:
        question: The question to ask the user
        options: Optional list of predefined answer choices (e.g., ["yes", "no", "skip"])
                If provided, user can select from these or type a custom answer

    Returns:
        The user's answer as a string

    Example:
        ask_user("Which authentication method should I use?", options=["JWT", "OAuth2", "Session"])
        ask_user("Should I add error handling to all endpoints?")
    """
    _operation_limiter.check_limit(f"ask_user({question[:30]}...)")

    from prompt_toolkit import prompt
    from prompt_toolkit.formatted_text import FormattedText
    from rich.console import Console
    from rich.markdown import Markdown
    from rich.panel import Panel

    console = Console()

    # Format the question - use markdown rendering if it contains markdown syntax
    console.print()

    # Check if the question contains markdown code blocks or other markdown
    has_markdown = any(marker in question for marker in ["```", "**", "##", "- ", "* ", "1. "])

    if has_markdown:
        # For markdown content, just use a simple header to keep code blocks copyable
        console.print("[bold cyan]Question from Agent:[/bold cyan]")
        console.print()
        md = Markdown(question)
        console.print(md)
        console.print()
    else:
        # Simple text question in a panel
        console.print(
            Panel(
                question,
                title="[bold cyan]Question from Agent[/bold cyan]",
                border_style="cyan",
                padding=(1, 2),
            )
        )

    # Show options if provided
    if options:
        console.print("\n[bold]Available options:[/bold]")
        for i, option in enumerate(options, 1):
            console.print(f"  {i}. {option}")
        console.print(
            "\n[dim]You can select a number, type an option, or provide a custom answer.[/dim]\n"
        )

        # Get user input using prompt_toolkit for proper readline support
        prompt_text = FormattedText([("ansibrightgreen bold", "Your answer:"), ("", " ")])
        user_input = prompt(prompt_text).strip()

        # Check if user entered a number corresponding to an option
        try:
            choice_num = int(user_input)
            if 1 <= choice_num <= len(options):
                answer = options[choice_num - 1]
                console.print(f"[dim]Selected: {answer}[/dim]\n")
            else:
                answer = user_input
        except ValueError:
            # Not a number, use as-is
            answer = user_input
    else:
        # No options, just get free-form answer
        prompt_text = FormattedText([("ansibrightgreen bold", "Your answer:"), ("", " ")])
        answer = prompt(prompt_text).strip()
        console.print()

    audit_logger.info(f"ASK_USER: Q: {question[:50]}... A: {answer[:50]}")
    return answer