mirror of
https://github.com/Hopiu/llm.git
synced 2026-04-29 17:34:45 +00:00
Capture tool calls from OpenAI streaming sync responses
Refs https://github.com/simonw/llm/issues/988#issuecomment-2869079084
This commit is contained in:
parent
7b450c8215
commit
7bc2f78156
2 changed files with 33 additions and 0 deletions
|
|
@ -19,6 +19,7 @@ from .models import (
|
|||
Prompt,
|
||||
Response,
|
||||
Tool,
|
||||
ToolCall,
|
||||
)
|
||||
from .utils import schema_dsl, Fragment
|
||||
from .embeddings import Collection
|
||||
|
|
@ -52,6 +53,7 @@ __all__ = [
|
|||
"Response",
|
||||
"Template",
|
||||
"Tool",
|
||||
"ToolCall",
|
||||
"user_dir",
|
||||
"schema_dsl",
|
||||
]
|
||||
|
|
|
|||
|
|
@ -587,6 +587,18 @@ class _Shared:
|
|||
"type": "json_schema",
|
||||
"json_schema": {"name": "output", "schema": prompt.schema},
|
||||
}
|
||||
if prompt.tools:
|
||||
kwargs["tools"] = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": tool.name,
|
||||
"description": tool.description or None,
|
||||
"parameters": tool.input_schema,
|
||||
},
|
||||
}
|
||||
for tool in prompt.tools
|
||||
]
|
||||
if stream:
|
||||
kwargs["stream_options"] = {"include_usage": True}
|
||||
return kwargs
|
||||
|
|
@ -618,10 +630,19 @@ class Chat(_Shared, KeyModel):
|
|||
**kwargs,
|
||||
)
|
||||
chunks = []
|
||||
tool_calls = {}
|
||||
for chunk in completion:
|
||||
chunks.append(chunk)
|
||||
if chunk.usage:
|
||||
usage = chunk.usage.model_dump()
|
||||
if chunk.choices and chunk.choices[0].delta:
|
||||
for tool_call in chunk.choices[0].delta.tool_calls or []:
|
||||
index = tool_call.index
|
||||
if index not in tool_calls:
|
||||
tool_calls[index] = tool_call
|
||||
tool_calls[
|
||||
index
|
||||
].function.arguments += tool_call.function.arguments
|
||||
try:
|
||||
content = chunk.choices[0].delta.content
|
||||
except IndexError:
|
||||
|
|
@ -629,6 +650,16 @@ class Chat(_Shared, KeyModel):
|
|||
if content is not None:
|
||||
yield content
|
||||
response.response_json = remove_dict_none_values(combine_chunks(chunks))
|
||||
if tool_calls:
|
||||
for value in tool_calls.values():
|
||||
# value.function looks like this:
|
||||
# ChoiceDeltaToolCallFunction(arguments='{"city":"San Francisco"}', name='get_weather')
|
||||
response.add_tool_call(
|
||||
llm.ToolCall(
|
||||
name=value.function.name,
|
||||
arguments=json.loads(value.function.arguments),
|
||||
)
|
||||
)
|
||||
else:
|
||||
completion = client.chat.completions.create(
|
||||
model=self.model_name or self.model_id,
|
||||
|
|
|
|||
Loading…
Reference in a new issue