--chain-limit option, closes #1025

This commit is contained in:
Simon Willison 2025-05-13 21:52:39 -04:00
parent 114d567da5
commit 2df619e7d8
3 changed files with 16 additions and 2 deletions

View file

@ -131,6 +131,8 @@ Options:
functions to register as tools
--td, --tools-debug Show full details of tool executions
--ta, --tools-approve Manually approve every tool execution
--cl, --chain-limit INTEGER How many chained tool responses to allow,
default 5, set 0 for unlimited
-o, --option <TEXT TEXT>... key/value options for the model
--schema TEXT JSON schema, filepath or ID
--schema-multi TEXT JSON schema to use for multiple results

View file

@ -362,6 +362,14 @@ def cli():
is_flag=True,
help="Manually approve every tool execution",
)
@click.option(
"chain_limit",
"--cl",
"--chain-limit",
type=int,
default=5,
help="How many chained tool responses to allow, default 5, set 0 for unlimited",
)
@click.option(
"options",
"-o",
@ -438,6 +446,7 @@ def prompt(
python_tools,
tools_debug,
tools_approve,
chain_limit,
options,
schema_input,
schema_multi,
@ -763,6 +772,7 @@ def prompt(
if tools or python_tools:
prompt_method = conversation.chain
kwargs["chain_limit"] = chain_limit
if tools_debug:
def debug_tool_call(_, tool_call, tool_result):

View file

@ -326,6 +326,7 @@ class Conversation(_BaseConversation):
schema: Optional[Union[dict, type[BaseModel]]] = None,
tools: Optional[List[Tool]] = None,
tool_results: Optional[List[ToolResult]] = None,
chain_limit: Optional[int] = None,
before_call: Optional[Callable[[Tool, ToolCall], None]] = None,
after_call: Optional[Callable[[Tool, ToolCall, ToolResult], None]] = None,
details: bool = False,
@ -353,6 +354,7 @@ class Conversation(_BaseConversation):
details=details,
before_call=before_call,
after_call=after_call,
chain_limit=chain_limit,
)
@classmethod
@ -1067,7 +1069,7 @@ class _BaseChainResponse:
conversation: _BaseConversation,
key: Optional[str] = None,
details: bool = False,
chain_limit: int = 10,
chain_limit: Optional[int] = 10,
before_call: Optional[Callable[[Tool, ToolCall], None]] = None,
after_call: Optional[Callable[[Tool, ToolCall, ToolResult], None]] = None,
):
@ -1101,7 +1103,7 @@ class _BaseChainResponse:
count += 1
yield response
self._responses.append(response)
if count > self.chain_limit:
if self.chain_limit and count > self.chain_limit:
raise ValueError(f"Chain limit of {self.chain_limit} exceeded. ")
# This could raise llm.CancelToolCall:
tool_results = response.execute_tool_calls(