llm logs command, closes #3

This commit is contained in:
Simon Willison 2023-04-01 18:52:46 -07:00
parent fa99b6c340
commit 41e5c5481f
3 changed files with 81 additions and 0 deletions

View file

@ -60,6 +60,26 @@ To avoid logging a prompt, pass `--no-log` or `-n` to the command:
llm 'Ten names for cheesecakes' -n
### Viewing the logs
You can view the logs using the `llm logs` command:
llm logs
This will output the three most recent logged items as a JSON array of objects.
Add `-n 10` to see the ten most recent items:
llm logs -n 10
Or `-n 0` to see everything that has ever been logged:
llm logs -n 0
You can also use [Datasette](https://datasette.io/) to browse your logs like this:
datasette ~/.llm/log.db
## Help
For help, run:

View file

@ -1,6 +1,7 @@
import click
from click_default_group import DefaultGroup
import datetime
import json
import openai
import os
import sqlite_utils
@ -86,6 +87,28 @@ def init_db():
db.vacuum()
@cli.command()
@click.option(
"-n",
"--count",
default=3,
help="Number of entries to show - 0 for all",
)
@click.option(
"-p",
"--path",
type=click.Path(readable=True, exists=True, dir_okay=False),
help="Path to log database",
)
def logs(count, path):
path = path or get_log_db_path()
if not os.path.exists(path):
raise click.ClickException("No log database found at {}".format(path))
db = sqlite_utils.Database(path)
rows = db["log"].rows_where(order_by="-rowid", limit=count or None)
click.echo(json.dumps(list(rows), indent=2))
def get_openai_api_key():
# Expand this to home directory / ~.openai-api-key.txt
if "OPENAI_API_KEY" in os.environ:

View file

@ -1,5 +1,8 @@
from click.testing import CliRunner
from llm.cli import cli
import json
import pytest
import sqlite_utils
def test_version():
@ -8,3 +11,38 @@ def test_version():
result = runner.invoke(cli, ["--version"])
assert result.exit_code == 0
assert result.output.startswith("cli, version ")
@pytest.fixture
def log_path(tmp_path):
path = str(tmp_path / "log.db")
db = sqlite_utils.Database(path)
db["log"].insert_all(
{
"command": "chatgpt",
"system": "system",
"prompt": "prompt",
"response": "response",
"model": "davinci",
}
for i in range(100)
)
return path
@pytest.mark.parametrize("n", (None, 0, 2))
def test_logs(n, log_path):
runner = CliRunner()
args = ["logs", "-p", log_path]
if n is not None:
args.extend(["-n", str(n)])
result = runner.invoke(cli, args)
assert result.exit_code == 0
logs = json.loads(result.output)
expected_length = 3
if n is not None:
if n == 0:
expected_length = 100
else:
expected_length = n
assert len(logs) == expected_length