mirror of
https://github.com/Hopiu/llm.git
synced 2026-05-05 20:34:44 +00:00
Schema template --save --schema support
* Don't hang on stdin if llm -t template-with-schema * Docs on using schemas with templates * Schema in template YAML file example * Test for --save with --schema Refs #778
This commit is contained in:
parent
f35ac31c21
commit
a0845874ec
5 changed files with 61 additions and 6 deletions
|
|
@ -26,6 +26,13 @@ You can also save default parameters:
|
|||
llm --system 'Summarize this text in the voice of $voice' \
|
||||
--model gpt-4 -p voice GlaDOS --save summarize
|
||||
```
|
||||
|
||||
Add `--schema` to bake a {ref}`schema <usage-schemas>` into your template:
|
||||
|
||||
```bash
|
||||
llm --schema dog.schema.json 'invent a dog' --save dog
|
||||
```
|
||||
|
||||
If you add `--extract` the setting to {ref}`extract the first fenced code block <usage-extract-fenced-code>` will be persisted in the template.
|
||||
```bash
|
||||
llm --system 'write a Python function' --extract --save python-function
|
||||
|
|
@ -68,15 +75,18 @@ This will open the system default editor.
|
|||
|
||||
:::{tip}
|
||||
You can control which editor will be used here using the `EDITOR` environment variable - for example, to use VS Code:
|
||||
|
||||
export EDITOR="code -w"
|
||||
|
||||
```bash
|
||||
export EDITOR="code -w"
|
||||
```
|
||||
Add that to your `~/.zshrc` or `~/.bashrc` file depending on which shell you use (`zsh` is the default on macOS since macOS Catalina in 2019).
|
||||
:::
|
||||
|
||||
You can also create a file called `summary.yaml` in the folder shown by running `llm templates path`, for example:
|
||||
```bash
|
||||
$ llm templates path
|
||||
llm templates path
|
||||
```
|
||||
Example output:
|
||||
```
|
||||
/Users/simon/Library/Application Support/io.datasette.llm/templates
|
||||
```
|
||||
|
||||
|
|
@ -120,6 +130,26 @@ You can combine system and regular prompts like so:
|
|||
system: You speak like an excitable Victorian adventurer
|
||||
prompt: 'Summarize this: $input'
|
||||
```
|
||||
### Schemas
|
||||
|
||||
Use the `schema:` key to embed a JSON schema (as YAML) in your template. The easiest way to create these is with the `llm --schema ... --save name-of-template` command - the result should look something like this:
|
||||
|
||||
```yaml
|
||||
name: dogs
|
||||
schema_object:
|
||||
properties:
|
||||
dogs:
|
||||
items:
|
||||
properties:
|
||||
bio:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
```
|
||||
|
||||
|
||||
### Additional template variables
|
||||
|
||||
|
|
|
|||
|
|
@ -152,7 +152,20 @@ llm --schema '{
|
|||
}
|
||||
}' -m gpt-4o-mini 'invent two dogs'
|
||||
```
|
||||
The JSON returned from the model should match that schema.
|
||||
LLM will pass this to the model, whish should result in JSON returned from the model matching that schema.
|
||||
|
||||
You can also save the JSON schema to a file and reference the filename using `--schema`:
|
||||
|
||||
```bash
|
||||
llm --schema dogs.schema.json 'invent two dogs'
|
||||
```
|
||||
Or save your schema {ref}`to a template <prompt-templates>` like this:
|
||||
|
||||
```bash
|
||||
llm --schema dogs.schema.json --save dogs
|
||||
# Then to use it:
|
||||
llm -t dogs 'invent two dogs'
|
||||
```
|
||||
|
||||
Be warned that different models may support different dialects of the JSON schema specification.
|
||||
|
||||
|
|
|
|||
|
|
@ -299,7 +299,7 @@ def prompt(
|
|||
model_aliases = get_model_aliases()
|
||||
|
||||
def read_prompt():
|
||||
nonlocal prompt
|
||||
nonlocal prompt, schema
|
||||
|
||||
# Is there extra prompt available on stdin?
|
||||
stdin_prompt = None
|
||||
|
|
@ -318,6 +318,7 @@ def prompt(
|
|||
and sys.stdin.isatty()
|
||||
and not attachments
|
||||
and not attachment_types
|
||||
and not schema
|
||||
):
|
||||
# Hang waiting for input to stdin (unless --save)
|
||||
prompt = sys.stdin.read()
|
||||
|
|
@ -356,6 +357,8 @@ def prompt(
|
|||
to_save["extract"] = True
|
||||
if extract_last:
|
||||
to_save["extract_last"] = True
|
||||
if schema:
|
||||
to_save["schema_object"] = schema
|
||||
path.write_text(
|
||||
yaml.dump(
|
||||
to_save,
|
||||
|
|
@ -374,6 +377,8 @@ def prompt(
|
|||
template_obj = load_template(template)
|
||||
extract = template_obj.extract
|
||||
extract_last = template_obj.extract_last
|
||||
if template_obj.schema_object:
|
||||
schema = template_obj.schema_object
|
||||
prompt = read_prompt()
|
||||
try:
|
||||
prompt, system = template_obj.evaluate(prompt, params)
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ class Template(BaseModel):
|
|||
# Should a fenced code block be extracted?
|
||||
extract: Optional[bool] = None
|
||||
extract_last: Optional[bool] = None
|
||||
schema_object: Optional[dict] = None
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
|
|
|
|||
|
|
@ -97,6 +97,12 @@ def test_templates_list(templates_path, args):
|
|||
{"system": "write python", "extract": True},
|
||||
None,
|
||||
),
|
||||
# So should schemas
|
||||
(
|
||||
["--schema", '{"properties": {"name": {"type": "string"}}}'],
|
||||
{"schema_object": {"properties": {"name": {"type": "string"}}}},
|
||||
None,
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_templates_prompt_save(templates_path, args, expected_prompt, expected_error):
|
||||
|
|
|
|||
Loading…
Reference in a new issue