--cid/--conversation option, docs for search - refs #160

This commit is contained in:
Simon Willison 2023-08-17 14:32:43 -07:00
parent a2eab1b0f7
commit a4f55e9987
4 changed files with 79 additions and 22 deletions

View file

@ -183,15 +183,16 @@ Usage: llm logs list [OPTIONS]
Show recent logged prompts and their responses
Options:
-n, --count INTEGER Number of entries to show - defaults to 3, use 0 for
all
-p, --path FILE Path to log database
-m, --model TEXT Filter by model or model alias
-q, --query TEXT Search for logs matching this string
-t, --truncate Truncate long strings in output
-c, --conversation TEXT Show logs for this conversation ID
--json Output logs as JSON
--help Show this message and exit.
-n, --count INTEGER Number of entries to show - defaults to 3, use 0
for all
-p, --path FILE Path to log database
-m, --model TEXT Filter by model or model alias
-q, --query TEXT Search for logs matching this string
-t, --truncate Truncate long strings in output
-c, --current Show logs from the current conversation
--cid, --conversation TEXT Show logs for this conversation ID
--json Output logs as JSON
--help Show this message and exit.
```
### llm models --help
```

View file

@ -54,7 +54,13 @@ You can view the logs using the `llm logs` command:
```bash
llm logs
```
This will output the three most recent logged items as a JSON array of objects.
This will output the three most recent logged items in Markdown format
Add `--json` to get the log messages in JSON instead:
```bash
llm logs --json
```
Add `-n 10` to see the ten most recent items:
```bash
@ -64,19 +70,39 @@ Or `-n 0` to see everything that has ever been logged:
```bash
llm logs -n 0
```
You can search the logs for a search term in the `prompt` or the `response` columns:
You can truncate the display of the prompts and responses using the `-t/--truncate` option. This can help make the JSON output more readable:
```bash
llm logs -n 5 -t --json
```
### Logs for a conversation
To view the logs for the most recent {ref}`conversation <conversation>` you have had with a model, use `-c`:
```bash
llm logs -c
```
To see logs for a specific conversation based on its ID, use `--cid ID` or `--conversation ID`:
```bash
llm logs --cid 01h82n0q9crqtnzmf13gkyxawg
```
### Searching the logs
You can search the logs for a search term in the `prompt` or the `response` columns.
```bash
llm logs -q 'cheesecake'
```
The most relevant terms will be shown at the bottom of the output.
### Filtering by model
You can filter to logs just for a specific model (or model alias) using `-m/--model`:
```bash
llm logs -m chatgpt
```
You can truncate the display of the prompts and responses using the `-t/--truncate` option:
```bash
llm logs -n 5 -t
```
This is useful for finding a conversation that you would like to continue.
### Browsing logs using Datasette
You can also use [Datasette](https://datasette.io/) to browse your logs like this:

View file

@ -30,6 +30,7 @@ Some models support options. You can pass these using `-o/--option name value` -
llm 'Ten names for cheesecakes' -o temperature 1.5
```
(conversation)=
## Continuing a conversation
By default, the tool will start a new conversation each time you run it.

View file

@ -424,7 +424,16 @@ order by responses_fts.rank desc{limit}
@click.option("-q", "--query", help="Search for logs matching this string")
@click.option("-t", "--truncate", is_flag=True, help="Truncate long strings in output")
@click.option(
"current_conversation",
"-c",
"--current",
is_flag=True,
flag_value=-1,
help="Show logs from the current conversation",
)
@click.option(
"conversation_id",
"--cid",
"--conversation",
help="Show logs for this conversation ID",
)
@ -434,7 +443,16 @@ order by responses_fts.rank desc{limit}
is_flag=True,
help="Output logs as JSON",
)
def logs_list(count, path, model, query, truncate, conversation, json_output):
def logs_list(
count,
path,
model,
query,
truncate,
current_conversation,
conversation_id,
json_output,
):
"Show recent logged prompts and their responses"
path = pathlib.Path(path or logs_db_path())
if not path.exists():
@ -442,9 +460,20 @@ def logs_list(count, path, model, query, truncate, conversation, json_output):
db = sqlite_utils.Database(path)
migrate(db)
if current_conversation:
try:
conversation_id = next(
db.query(
"select conversation_id from responses order by id desc limit 1"
)
)["conversation_id"]
except StopIteration:
# No conversations yet
raise click.ClickException("No conversations found")
# For --conversation set limit 0, if not explicitly set
if count is None:
if conversation:
if conversation_id:
count = 0
else:
count = 3
@ -474,8 +503,8 @@ def logs_list(count, path, model, query, truncate, conversation, json_output):
where_bits = []
if model_id:
where_bits.append("responses.model = :model")
if conversation:
where_bits.append("responses.conversation_id = :conversation")
if conversation_id:
where_bits.append("responses.conversation_id = :conversation_id")
if where_bits:
sql_format["extra_where"] = " where " + " and ".join(where_bits)
@ -483,7 +512,7 @@ def logs_list(count, path, model, query, truncate, conversation, json_output):
rows = list(
db.query(
final_sql,
{"model": model_id, "query": query, "conversation": conversation},
{"model": model_id, "query": query, "conversation_id": conversation_id},
)
)
# Reverse the order - we do this because we 'order by id desc limit 3' to get the
@ -524,7 +553,7 @@ def logs_list(count, path, model, query, truncate, conversation, json_output):
)
)
# In conversation log mode only show it for the first one
if conversation:
if conversation_id:
should_show_conversation = False
click.echo("## Prompt:\n\n{}".format(row["prompt"]))
if row["system"] != current_system: