--log option, closes #68

This commit is contained in:
Simon Willison 2023-07-11 20:18:16 -07:00
parent 05b4bcf57c
commit fa67b3fdaf
4 changed files with 31 additions and 9 deletions

View file

@ -82,6 +82,7 @@ Options:
-p, --param <TEXT TEXT>... Parameters for template
--no-stream Do not stream output
-n, --no-log Don't log to database
--log Log prompt and response to the database
-c, --continue Continue the most recent conversation.
--cid, --conversation TEXT Continue the conversation with the given ID.
--key TEXT API key to use

View file

@ -24,13 +24,16 @@ To turn logging by default off:
```bash
llm logs off
```
To turn it back on again:
If you've turned off logging you can still log an individual prompt and response by adding `--log`:
```bash
llm 'Five ambitious names for a pet pterodactyl' --log
```
To turn logging by default back on again:
```bash
llm logs on
```
To see the status of that database, run this:
To see the status of the logs database, run this:
```bash
llm logs status
```
@ -38,8 +41,8 @@ Example output:
```
Logging is ON for all prompts
Found log database at /Users/simon/Library/Application Support/io.datasette.llm/logs.db
Number of conversations logged: 32
Number of responses logged: 47
Number of conversations logged: 33
Number of responses logged: 48
Database file size: 19.96MB
```

View file

@ -80,6 +80,7 @@ def cli():
)
@click.option("--no-stream", is_flag=True, help="Do not stream output")
@click.option("-n", "--no-log", is_flag=True, help="Don't log to database")
@click.option("--log", is_flag=True, help="Log prompt and response to the database")
@click.option(
"_continue",
"-c",
@ -105,6 +106,7 @@ def prompt(
param,
no_stream,
no_log,
log,
_continue,
conversation_id,
key,
@ -115,6 +117,9 @@ def prompt(
Documentation: https://llm.datasette.io/en/stable/usage.html
"""
if log and no_log:
raise click.ClickException("--log and --no-log are mutually exclusive")
model_aliases = get_model_aliases()
def read_prompt():
@ -247,7 +252,7 @@ def prompt(
raise click.ClickException(str(ex))
# Log to the database
if logs_on():
if (logs_on() or log) and not no_log:
log_path = logs_db_path()
db = sqlite_utils.Database(log_path)
migrate(db)

View file

@ -65,8 +65,20 @@ def test_logs_path(monkeypatch, env, user_path):
@mock.patch.dict(os.environ, {"OPENAI_API_KEY": "X"})
@pytest.mark.parametrize("use_stdin", (True, False))
@pytest.mark.parametrize("logs_off", (True, False))
def test_llm_default_prompt(mocked_openai, use_stdin, user_path, logs_off):
@pytest.mark.parametrize(
"logs_off,logs_args,should_log",
(
(True, [], False),
(False, [], True),
(False, ["--no-log"], False),
(False, ["--log"], True),
(True, ["-n"], False), # Short for --no-log
(True, ["--log"], True),
),
)
def test_llm_default_prompt(
mocked_openai, use_stdin, user_path, logs_off, logs_args, should_log
):
# Reset the log_path database
log_path = user_path / "logs.db"
log_db = sqlite_utils.Database(str(log_path))
@ -92,6 +104,7 @@ def test_llm_default_prompt(mocked_openai, use_stdin, user_path, logs_off):
input = prompt
else:
args.append(prompt)
args += logs_args
result = runner.invoke(cli, args, input=input, catch_exceptions=False)
assert result.exit_code == 0
assert result.output == "Bob, Alice, Eve\n"
@ -100,7 +113,7 @@ def test_llm_default_prompt(mocked_openai, use_stdin, user_path, logs_off):
# Was it logged?
rows = list(log_db["responses"].rows)
if logs_off:
if not should_log:
assert len(rows) == 0
return