From 2df619e7d8bf1ad562e521a4d05b659e0b536381 Mon Sep 17 00:00:00 2001 From: Simon Willison Date: Tue, 13 May 2025 21:52:39 -0400 Subject: [PATCH] --chain-limit option, closes #1025 --- docs/help.md | 2 ++ llm/cli.py | 10 ++++++++++ llm/models.py | 6 ++++-- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/docs/help.md b/docs/help.md index 350afca..8c6f226 100644 --- a/docs/help.md +++ b/docs/help.md @@ -131,6 +131,8 @@ Options: functions to register as tools --td, --tools-debug Show full details of tool executions --ta, --tools-approve Manually approve every tool execution + --cl, --chain-limit INTEGER How many chained tool responses to allow, + default 5, set 0 for unlimited -o, --option ... key/value options for the model --schema TEXT JSON schema, filepath or ID --schema-multi TEXT JSON schema to use for multiple results diff --git a/llm/cli.py b/llm/cli.py index 4a63201..089c4c3 100644 --- a/llm/cli.py +++ b/llm/cli.py @@ -362,6 +362,14 @@ def cli(): is_flag=True, help="Manually approve every tool execution", ) +@click.option( + "chain_limit", + "--cl", + "--chain-limit", + type=int, + default=5, + help="How many chained tool responses to allow, default 5, set 0 for unlimited", +) @click.option( "options", "-o", @@ -438,6 +446,7 @@ def prompt( python_tools, tools_debug, tools_approve, + chain_limit, options, schema_input, schema_multi, @@ -763,6 +772,7 @@ def prompt( if tools or python_tools: prompt_method = conversation.chain + kwargs["chain_limit"] = chain_limit if tools_debug: def debug_tool_call(_, tool_call, tool_result): diff --git a/llm/models.py b/llm/models.py index 34fa775..68f9b39 100644 --- a/llm/models.py +++ b/llm/models.py @@ -326,6 +326,7 @@ class Conversation(_BaseConversation): schema: Optional[Union[dict, type[BaseModel]]] = None, tools: Optional[List[Tool]] = None, tool_results: Optional[List[ToolResult]] = None, + chain_limit: Optional[int] = None, before_call: Optional[Callable[[Tool, ToolCall], None]] = None, after_call: Optional[Callable[[Tool, ToolCall, ToolResult], None]] = None, details: bool = False, @@ -353,6 +354,7 @@ class Conversation(_BaseConversation): details=details, before_call=before_call, after_call=after_call, + chain_limit=chain_limit, ) @classmethod @@ -1067,7 +1069,7 @@ class _BaseChainResponse: conversation: _BaseConversation, key: Optional[str] = None, details: bool = False, - chain_limit: int = 10, + chain_limit: Optional[int] = 10, before_call: Optional[Callable[[Tool, ToolCall], None]] = None, after_call: Optional[Callable[[Tool, ToolCall, ToolResult], None]] = None, ): @@ -1101,7 +1103,7 @@ class _BaseChainResponse: count += 1 yield response self._responses.append(response) - if count > self.chain_limit: + if self.chain_limit and count > self.chain_limit: raise ValueError(f"Chain limit of {self.chain_limit} exceeded. ") # This could raise llm.CancelToolCall: tool_results = response.execute_tool_calls(