mirror of
https://github.com/Hopiu/llm.git
synced 2026-04-13 01:31:00 +00:00
llm.get_models() and llm.get_async_models(), closes #640
This commit is contained in:
parent
845322e970
commit
c52cfee881
3 changed files with 46 additions and 2 deletions
|
|
@ -18,7 +18,7 @@ model.key = "sk-..."
|
|||
response = model.prompt("Five surprising names for a pet pelican")
|
||||
print(response.text())
|
||||
```
|
||||
The `llm.get_model()` function accepts model names or aliases. You can also omit it to use the currently configured default model, which is `gpt-4o-mini` if you have not changed the default.
|
||||
The `llm.get_model()` function accepts model IDs or aliases. You can also omit it to use the currently configured default model, which is `gpt-4o-mini` if you have not changed the default.
|
||||
|
||||
In this example the key is set by Python code. You can also provide the key using the `OPENAI_API_KEY` environment variable, or use the `llm keys set openai` command to store it in a `keys.json` file, see {ref}`api-keys`.
|
||||
|
||||
|
|
@ -35,7 +35,7 @@ llm models
|
|||
```
|
||||
If you have set a `OPENAI_API_KEY` environment variable you can omit the `model.key = ` line.
|
||||
|
||||
Calling `llm.get_model()` with an invalid model name will raise a `llm.UnknownModelError` exception.
|
||||
Calling `llm.get_model()` with an invalid model ID will raise a `llm.UnknownModelError` exception.
|
||||
|
||||
(python-api-system-prompts)=
|
||||
|
||||
|
|
@ -99,6 +99,24 @@ print(response.text())
|
|||
```
|
||||
Some models do not use API keys at all.
|
||||
|
||||
### Listing models
|
||||
|
||||
The `llm.get_models()` list returns a list of all available models, including those from plugins.
|
||||
|
||||
```python
|
||||
import llm
|
||||
|
||||
for model in llm.get_models():
|
||||
print(model.model_id)
|
||||
```
|
||||
|
||||
Use `llm.get_async_models()` to list async models:
|
||||
|
||||
```python
|
||||
for model in llm.get_async_models():
|
||||
print(model.model_id)
|
||||
```
|
||||
|
||||
### Streaming responses
|
||||
|
||||
For models that support it you can stream responses as they are generated, like this:
|
||||
|
|
|
|||
|
|
@ -167,7 +167,18 @@ class UnknownModelError(KeyError):
|
|||
pass
|
||||
|
||||
|
||||
def get_models() -> List[Model]:
|
||||
"Get all registered models"
|
||||
return [model for model in get_model_aliases().values()]
|
||||
|
||||
|
||||
def get_async_models() -> List[AsyncModel]:
|
||||
"Get all registered async models"
|
||||
return [model for model in get_async_model_aliases().values()]
|
||||
|
||||
|
||||
def get_async_model(name: Optional[str] = None) -> AsyncModel:
|
||||
"Get an async model by name or alias"
|
||||
aliases = get_async_model_aliases()
|
||||
name = name or get_default_model()
|
||||
try:
|
||||
|
|
@ -186,6 +197,7 @@ def get_async_model(name: Optional[str] = None) -> AsyncModel:
|
|||
|
||||
|
||||
def get_model(name: Optional[str] = None, _skip_async: bool = False) -> Model:
|
||||
"Get a model by name or alias"
|
||||
aliases = get_model_aliases()
|
||||
name = name or get_default_model()
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -596,3 +596,17 @@ def test_model_defaults(tmpdir, monkeypatch):
|
|||
assert config_path.exists()
|
||||
assert llm.get_default_model() == "gpt-4o"
|
||||
assert llm.get_model().model_id == "gpt-4o"
|
||||
|
||||
|
||||
def test_get_models():
|
||||
models = llm.get_models()
|
||||
assert all(isinstance(model, llm.Model) for model in models)
|
||||
model_ids = [model.model_id for model in models]
|
||||
assert "gpt-4o-mini" in model_ids
|
||||
|
||||
|
||||
def test_get_async_models():
|
||||
models = llm.get_async_models()
|
||||
assert all(isinstance(model, llm.AsyncModel) for model in models)
|
||||
model_ids = [model.model_id for model in models]
|
||||
assert "gpt-4o-mini" in model_ids
|
||||
|
|
|
|||
Loading…
Reference in a new issue