mirror of
https://github.com/Hopiu/llm.git
synced 2026-03-16 20:50:25 +00:00
parent
5f44f89de2
commit
3e3492898c
3 changed files with 66 additions and 7 deletions
|
|
@ -464,6 +464,33 @@ response = conversation.prompt(
|
|||
|
||||
Access `conversation.responses` for a list of all of the responses that have so far been returned during the conversation.
|
||||
|
||||
### Conversations using tools
|
||||
|
||||
You can pass a list of tool functions to the `tools=[]` argument when you start a new conversation:
|
||||
```python
|
||||
import llm
|
||||
|
||||
def upper(text: str) -> str:
|
||||
"convert text to upper case"
|
||||
return text.upper()
|
||||
|
||||
def reverse(text: str) -> str:
|
||||
"reverse text"
|
||||
return text[::-1]
|
||||
|
||||
model = llm.get_model("gpt-4.1-mini")
|
||||
conversation = model.conversation(tools=[upper, reverse])
|
||||
```
|
||||
You can then call the `conversation.chain()` method multiple times to have a conversation that uses those tools:
|
||||
```python
|
||||
print(conversation.chain(
|
||||
"Convert panda to uppercase and reverse it"
|
||||
).text())
|
||||
print(conversation.chain(
|
||||
"Same with pangolin"
|
||||
).text())
|
||||
```
|
||||
|
||||
(python-api-listing-models)=
|
||||
|
||||
## Listing models
|
||||
|
|
|
|||
|
|
@ -273,6 +273,7 @@ class _BaseConversation:
|
|||
id: str = field(default_factory=lambda: str(ULID()).lower())
|
||||
name: Optional[str] = None
|
||||
responses: List["_BaseResponse"] = field(default_factory=list)
|
||||
tools: Optional[List[Tool]] = None
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
|
|
@ -305,7 +306,7 @@ class Conversation(_BaseConversation):
|
|||
attachments=attachments,
|
||||
system=system,
|
||||
schema=schema,
|
||||
tools=tools,
|
||||
tools=tools or self.tools,
|
||||
tool_results=tool_results,
|
||||
system_fragments=system_fragments,
|
||||
options=self.model.Options(**options),
|
||||
|
|
@ -343,7 +344,7 @@ class Conversation(_BaseConversation):
|
|||
attachments=attachments,
|
||||
system=system,
|
||||
schema=schema,
|
||||
tools=tools,
|
||||
tools=tools or self.tools,
|
||||
tool_results=tool_results,
|
||||
system_fragments=system_fragments,
|
||||
model=self.model,
|
||||
|
|
@ -408,7 +409,7 @@ class AsyncConversation(_BaseConversation):
|
|||
attachments=attachments,
|
||||
system=system,
|
||||
schema=schema,
|
||||
tools=tools,
|
||||
tools=tools or self.tools,
|
||||
tool_results=tool_results,
|
||||
system_fragments=system_fragments,
|
||||
model=self.model,
|
||||
|
|
@ -1481,8 +1482,8 @@ class _BaseModel(ABC, _get_key_mixin):
|
|||
|
||||
|
||||
class _Model(_BaseModel):
|
||||
def conversation(self) -> Conversation:
|
||||
return Conversation(model=self)
|
||||
def conversation(self, tools: Optional[List[Tool]] = None) -> Conversation:
|
||||
return Conversation(model=self, tools=tools)
|
||||
|
||||
def prompt(
|
||||
self,
|
||||
|
|
@ -1580,8 +1581,8 @@ class KeyModel(_Model):
|
|||
|
||||
|
||||
class _AsyncModel(_BaseModel):
|
||||
def conversation(self) -> AsyncConversation:
|
||||
return AsyncConversation(model=self)
|
||||
def conversation(self, tools: Optional[List[Tool]] = None) -> AsyncConversation:
|
||||
return AsyncConversation(model=self, tools=tools)
|
||||
|
||||
def prompt(
|
||||
self,
|
||||
|
|
|
|||
|
|
@ -170,3 +170,34 @@ async def test_async_tools_run_tools_in_parallel():
|
|||
delta_ns = start_timestamps[1][1] - start_timestamps[0][1]
|
||||
# They should have run in parallel so it should be less than 0.02s difference
|
||||
assert delta_ns < (100_000_000 * 0.2)
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
def test_conversation_with_tools(vcr):
|
||||
import llm
|
||||
|
||||
def add(a: int, b: int) -> int:
|
||||
return a + b
|
||||
|
||||
def multiply(a: int, b: int) -> int:
|
||||
return a * b
|
||||
|
||||
model = llm.get_model("echo")
|
||||
conversation = model.conversation(tools=[add, multiply])
|
||||
|
||||
output1 = conversation.chain(
|
||||
json.dumps(
|
||||
{"tool_calls": [{"name": "multiply", "arguments": {"a": 5324, "b": 23233}}]}
|
||||
)
|
||||
).text()
|
||||
assert "123692492" in output1
|
||||
output2 = conversation.chain(
|
||||
json.dumps(
|
||||
{
|
||||
"tool_calls": [
|
||||
{"name": "add", "arguments": {"a": 841758375, "b": 123123}}
|
||||
]
|
||||
}
|
||||
)
|
||||
).text()
|
||||
assert "841881498" in output2
|
||||
|
|
|
|||
Loading…
Reference in a new issue