Skip to content

Commit 83c078f

Browse files
fix: adding missing async hooks (#33957)
* filling in missing async gaps * using recommended tool runtime injection instead of injected state * updating tests to use helper function as well
1 parent 26d39ff commit 83c078f

File tree

8 files changed

+326
-103
lines changed

8 files changed

+326
-103
lines changed

libs/langchain_v1/langchain/agents/middleware/human_in_the_loop.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -353,3 +353,7 @@ def after_model(self, state: AgentState, runtime: Runtime) -> dict[str, Any] | N
353353
last_ai_msg.tool_calls = revised_tool_calls
354354

355355
return {"messages": [last_ai_msg, *artificial_tool_messages]}
356+
357+
async def aafter_model(self, state: AgentState, runtime: Runtime) -> dict[str, Any] | None:
358+
"""Async trigger interrupt flows for relevant tool calls after an `AIMessage`."""
359+
return self.after_model(state, runtime)

libs/langchain_v1/langchain/agents/middleware/model_call_limit.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,29 @@ def before_model(self, state: ModelCallLimitState, runtime: Runtime) -> dict[str
198198

199199
return None
200200

201+
@hook_config(can_jump_to=["end"])
202+
async def abefore_model(
203+
self,
204+
state: ModelCallLimitState,
205+
runtime: Runtime,
206+
) -> dict[str, Any] | None:
207+
"""Async check model call limits before making a model call.
208+
209+
Args:
210+
state: The current agent state containing call counts.
211+
runtime: The langgraph runtime.
212+
213+
Returns:
214+
If limits are exceeded and exit_behavior is `'end'`, returns
215+
a `Command` to jump to the end with a limit exceeded message. Otherwise
216+
returns `None`.
217+
218+
Raises:
219+
ModelCallLimitExceededError: If limits are exceeded and `exit_behavior`
220+
is `'error'`.
221+
"""
222+
return self.before_model(state, runtime)
223+
201224
def after_model(self, state: ModelCallLimitState, runtime: Runtime) -> dict[str, Any] | None: # noqa: ARG002
202225
"""Increment model call counts after a model call.
203226
@@ -212,3 +235,19 @@ def after_model(self, state: ModelCallLimitState, runtime: Runtime) -> dict[str,
212235
"thread_model_call_count": state.get("thread_model_call_count", 0) + 1,
213236
"run_model_call_count": state.get("run_model_call_count", 0) + 1,
214237
}
238+
239+
async def aafter_model(
240+
self,
241+
state: ModelCallLimitState,
242+
runtime: Runtime,
243+
) -> dict[str, Any] | None:
244+
"""Async increment model call counts after a model call.
245+
246+
Args:
247+
state: The current agent state.
248+
runtime: The langgraph runtime.
249+
250+
Returns:
251+
State updates with incremented call counts.
252+
"""
253+
return self.after_model(state, runtime)

libs/langchain_v1/langchain/agents/middleware/pii.py

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -252,6 +252,27 @@ def before_model(
252252

253253
return None
254254

255+
@hook_config(can_jump_to=["end"])
256+
async def abefore_model(
257+
self,
258+
state: AgentState,
259+
runtime: Runtime,
260+
) -> dict[str, Any] | None:
261+
"""Async check user messages and tool results for PII before model invocation.
262+
263+
Args:
264+
state: The current agent state.
265+
runtime: The langgraph runtime.
266+
267+
Returns:
268+
Updated state with PII handled according to strategy, or `None` if no PII
269+
detected.
270+
271+
Raises:
272+
PIIDetectionError: If PII is detected and strategy is `'block'`.
273+
"""
274+
return self.before_model(state, runtime)
275+
255276
def after_model(
256277
self,
257278
state: AgentState,
@@ -311,6 +332,26 @@ def after_model(
311332

312333
return {"messages": new_messages}
313334

335+
async def aafter_model(
336+
self,
337+
state: AgentState,
338+
runtime: Runtime,
339+
) -> dict[str, Any] | None:
340+
"""Async check AI messages for PII after model invocation.
341+
342+
Args:
343+
state: The current agent state.
344+
runtime: The langgraph runtime.
345+
346+
Returns:
347+
Updated state with PII handled according to strategy, or None if no PII
348+
detected.
349+
350+
Raises:
351+
PIIDetectionError: If PII is detected and strategy is `'block'`.
352+
"""
353+
return self.after_model(state, runtime)
354+
314355

315356
__all__ = [
316357
"PIIDetectionError",

libs/langchain_v1/langchain/agents/middleware/shell_tool.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -482,7 +482,7 @@ def before_agent(self, state: ShellToolState, runtime: Runtime) -> dict[str, Any
482482
return {"shell_session_resources": resources}
483483

484484
async def abefore_agent(self, state: ShellToolState, runtime: Runtime) -> dict[str, Any] | None:
485-
"""Async counterpart to `before_agent`."""
485+
"""Async start the shell session and run startup commands."""
486486
return self.before_agent(state, runtime)
487487

488488
def after_agent(self, state: ShellToolState, runtime: Runtime) -> None: # noqa: ARG002
@@ -494,7 +494,7 @@ def after_agent(self, state: ShellToolState, runtime: Runtime) -> None: # noqa:
494494
resources._finalizer()
495495

496496
async def aafter_agent(self, state: ShellToolState, runtime: Runtime) -> None:
497-
"""Async counterpart to `after_agent`."""
497+
"""Async run shutdown commands and release resources when an agent completes."""
498498
return self.after_agent(state, runtime)
499499

500500
def _ensure_resources(self, state: ShellToolState) -> _SessionResources:
@@ -689,7 +689,8 @@ async def awrap_tool_call(
689689
request: ToolCallRequest,
690690
handler: typing.Callable[[ToolCallRequest], typing.Awaitable[ToolMessage | Command]],
691691
) -> ToolMessage | Command:
692-
"""Async interception mirroring the synchronous tool handler."""
692+
"""Async intercept local shell tool calls and execute them via the managed session."""
693+
# The sync version already handles all the work, no need for async-specific logic
693694
if isinstance(request.tool, _PersistentShellTool):
694695
resources = self._ensure_resources(request.state)
695696
return self._run_shell_tool(

libs/langchain_v1/langchain/agents/middleware/tool_call_limit.py

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -451,3 +451,28 @@ def after_model(
451451
"run_tool_call_count": run_counts,
452452
"messages": artificial_messages,
453453
}
454+
455+
@hook_config(can_jump_to=["end"])
456+
async def aafter_model(
457+
self,
458+
state: ToolCallLimitState[ResponseT],
459+
runtime: Runtime[ContextT],
460+
) -> dict[str, Any] | None:
461+
"""Async increment tool call counts after a model call and check limits.
462+
463+
Args:
464+
state: The current agent state.
465+
runtime: The langgraph runtime.
466+
467+
Returns:
468+
State updates with incremented tool call counts. If limits are exceeded
469+
and exit_behavior is `'end'`, also includes a jump to end with a
470+
`ToolMessage` and AI message for the single exceeded tool call.
471+
472+
Raises:
473+
ToolCallLimitExceededError: If limits are exceeded and `exit_behavior`
474+
is `'error'`.
475+
NotImplementedError: If limits are exceeded, `exit_behavior` is `'end'`,
476+
and there are multiple tool calls.
477+
"""
478+
return self.after_model(state, runtime)

libs/partners/anthropic/langchain_anthropic/middleware/bash.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,18 @@ def wrap_model_call(
4040
request = request.override(tools=tools)
4141
return handler(request)
4242

43+
async def awrap_model_call(
44+
self,
45+
request: ModelRequest,
46+
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
47+
) -> ModelResponse:
48+
"""Async: ensure the Claude bash descriptor is available to the model."""
49+
tools = request.tools
50+
if all(tool is not _CLAUDE_BASH_DESCRIPTOR for tool in tools):
51+
tools = [*tools, _CLAUDE_BASH_DESCRIPTOR]
52+
request = request.override(tools=tools)
53+
return await handler(request)
54+
4355
def wrap_tool_call(
4456
self,
4557
request: ToolCallRequest,

0 commit comments

Comments
 (0)