Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 46 additions & 0 deletions litellm/integrations/custom_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
AsyncGenerator,
Dict,
List,
NamedTuple,
Optional,
Tuple,
Union,
Expand Down Expand Up @@ -43,6 +44,9 @@
MCPPreCallRequestObject,
MCPPreCallResponseObject,
)
from litellm.types.llms.anthropic_messages.anthropic_response import (
AnthropicMessagesResponse,
)
from litellm.types.router import PreRoutingHookResponse

Span = Union[_Span, Any]
Expand All @@ -56,6 +60,7 @@
MCPDuringCallRequestObject = Any
MCPDuringCallResponseObject = Any
PreRoutingHookResponse = Any
AnthropicMessagesResponse = Any


_BASE64_INLINE_PATTERN = re.compile(
Expand All @@ -64,6 +69,19 @@
)


class ToolCallResult(NamedTuple):
"""Result of executing a single tool call via async_execute_tool_calls."""

tool_call_id: str
"""The id of the tool_use block that was executed."""

content: str
"""Text result to return to the model."""

is_error: bool
"""Whether this result represents an error."""


class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callback#callback-class
# Class variables or attributes
def __init__(
Expand Down Expand Up @@ -533,6 +551,34 @@ async def async_post_mcp_tool_call_hook(
"""
return None

#########################################################
# TOOL EXECUTION HOOKS (simplified tool interception)
#########################################################

async def async_execute_tool_calls(
self,
response: Union["AnthropicMessagesResponse", ModelResponse],
kwargs: Dict,
) -> List[ToolCallResult]:
"""
Detect and execute tool calls in the model response.

This is the simplified alternative to the two-step
async_should_run_agentic_loop / async_run_agentic_loop pattern.
Callbacks only need to detect tool calls and return results — the
framework handles message construction, thinking block preservation,
max_tokens adjustment, kwargs cleanup, and follow-up API requests.

Args:
response: Model response (AnthropicMessagesResponse dict, or ModelResponse)
kwargs: Full request kwargs (includes custom_llm_provider, tools, etc.)

Returns:
List of ToolCallResult for tool calls this callback handled.
Empty list means nothing was handled (skip this callback).
"""
return []

#########################################################
# AGENTIC LOOP HOOKS (for litellm.messages + future completion support)
#########################################################
Expand Down
Loading
Loading