mirror of
https://github.com/Hopiu/llm.git
synced 2026-04-24 23:14:45 +00:00
llm models list --schemas option, closes #797
Also fixed bug where features showed even without --options, refs #796
This commit is contained in:
parent
4a7a1f19ed
commit
98cccd294a
4 changed files with 12 additions and 4 deletions
|
|
@ -344,6 +344,7 @@ Usage: llm models list [OPTIONS]
|
|||
Options:
|
||||
--options Show options for each model, if available
|
||||
--async List async models
|
||||
--schemas List models that support schemas
|
||||
-q, --query TEXT Search for models matching these strings
|
||||
--help Show this message and exit.
|
||||
```
|
||||
|
|
|
|||
|
|
@ -71,6 +71,11 @@ This example uses [uvx](https://docs.astral.sh/uv/guides/tools/) to run [strip-t
|
|||
|
||||
This will instruct the model to return an array of JSON objects with the specified structure, each containing a headline, summary, and array of key people mentioned.
|
||||
|
||||
For a list of available models that support schemas, run this command:
|
||||
```bash
|
||||
llm models --schemas
|
||||
```
|
||||
|
||||
(schemas-dsl)=
|
||||
|
||||
## Alternative schema syntax
|
||||
|
|
|
|||
|
|
@ -721,7 +721,6 @@ OpenAI Completion: gpt-3.5-turbo-instruct (aliases: 3.5-instruct, chatgpt-instru
|
|||
Include the log probabilities of most likely N per token
|
||||
Features:
|
||||
- streaming
|
||||
Default: gpt-4o-mini
|
||||
|
||||
```
|
||||
<!-- [[[end]]] -->
|
||||
|
|
|
|||
|
|
@ -1287,13 +1287,14 @@ _type_lookup = {
|
|||
"--options", is_flag=True, help="Show options for each model, if available"
|
||||
)
|
||||
@click.option("async_", "--async", is_flag=True, help="List async models")
|
||||
@click.option("--schemas", is_flag=True, help="List models that support schemas")
|
||||
@click.option(
|
||||
"-q",
|
||||
"--query",
|
||||
multiple=True,
|
||||
help="Search for models matching these strings",
|
||||
)
|
||||
def models_list(options, async_, query):
|
||||
def models_list(options, async_, schemas, query):
|
||||
"List available models"
|
||||
models_that_have_shown_options = set()
|
||||
for model_with_aliases in get_models_with_aliases():
|
||||
|
|
@ -1303,6 +1304,8 @@ def models_list(options, async_, query):
|
|||
# Only show models where every provided query string matches
|
||||
if not all(model_with_aliases.matches(q) for q in query):
|
||||
continue
|
||||
if schemas and not model_with_aliases.model.supports_schema:
|
||||
continue
|
||||
extra = ""
|
||||
if model_with_aliases.aliases:
|
||||
extra = " (aliases: {})".format(", ".join(model_with_aliases.aliases))
|
||||
|
|
@ -1346,12 +1349,12 @@ def models_list(options, async_, query):
|
|||
+ (["streaming"] if model.can_stream else [])
|
||||
+ (["schemas"] if model.supports_schema else [])
|
||||
)
|
||||
if features:
|
||||
if options and features:
|
||||
output += "\n Features:\n{}".format(
|
||||
"\n".join(" - {}".format(feature) for feature in features)
|
||||
)
|
||||
click.echo(output)
|
||||
if not query:
|
||||
if not query and not options and not schemas:
|
||||
click.echo(f"Default: {get_default_model()}")
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue