diff --git a/docs/help.md b/docs/help.md index eafe8ad..9f089bb 100644 --- a/docs/help.md +++ b/docs/help.md @@ -147,6 +147,8 @@ Options: Commands: list* Show recent logged prompts and their responses + off Turn off logging for all prompts + on Turn on logging for all prompts path Output the path to the logs.db file status Show current status of database logging ``` @@ -165,6 +167,24 @@ Usage: llm logs status [OPTIONS] Show current status of database logging +Options: + --help Show this message and exit. +``` +#### llm logs on --help +``` +Usage: llm logs on [OPTIONS] + + Turn on logging for all prompts + +Options: + --help Show this message and exit. +``` +#### llm logs off --help +``` +Usage: llm logs off [OPTIONS] + + Turn off logging for all prompts + Options: --help Show this message and exit. ``` diff --git a/llm/cli.py b/llm/cli.py index f0500ae..d30e025 100644 --- a/llm/cli.py +++ b/llm/cli.py @@ -247,15 +247,11 @@ def prompt( raise click.ClickException(str(ex)) # Log to the database - if no_log: - return - - log_path = logs_db_path() - if not log_path.exists(): - return - db = sqlite_utils.Database(log_path) - migrate(db) - response.log_to_db(db) + if logs_on(): + log_path = logs_db_path() + db = sqlite_utils.Database(log_path) + migrate(db) + response.log_to_db(db) def load_conversation(conversation_id: Optional[str]) -> Optional[Conversation]: @@ -358,6 +354,10 @@ def logs_status(): if not path.exists(): click.echo("No log database found at {}".format(path)) return + if logs_on(): + click.echo("Logging is ON for all prompts".format()) + else: + click.echo("Logging is OFF".format()) db = sqlite_utils.Database(path) migrate(db) click.echo("Found log database at {}".format(path)) @@ -368,6 +368,21 @@ def logs_status(): ) +@logs.command(name="on") +def logs_turn_on(): + "Turn on logging for all prompts" + path = user_dir() / "logs-off" + if path.exists(): + path.unlink() + + +@logs.command(name="off") +def logs_turn_off(): + "Turn off logging for all prompts" + path = user_dir() / "logs-off" + path.touch() + + @logs.command(name="list") @click.option( "-n", @@ -687,3 +702,7 @@ def _human_readable_size(size_bytes): i += 1 return "{:.2f}{}".format(size_bytes, size_name[i]) + + +def logs_on(): + return not (user_dir() / "logs-off").exists() diff --git a/tests/test_llm.py b/tests/test_llm.py index 0d7a381..e93b583 100644 --- a/tests/test_llm.py +++ b/tests/test_llm.py @@ -65,11 +65,25 @@ def test_logs_path(monkeypatch, env, user_path): @mock.patch.dict(os.environ, {"OPENAI_API_KEY": "X"}) @pytest.mark.parametrize("use_stdin", (True, False)) -def test_llm_default_prompt(mocked_openai, use_stdin, user_path): +@pytest.mark.parametrize("logs_off", (True, False)) +def test_llm_default_prompt(mocked_openai, use_stdin, user_path, logs_off): # Reset the log_path database log_path = user_path / "logs.db" log_db = sqlite_utils.Database(str(log_path)) log_db["responses"].delete_where() + + logs_off_path = user_path / "logs-off" + if logs_off: + # Turn off logging + assert not logs_off_path.exists() + CliRunner().invoke(cli, ["logs", "off"]) + assert logs_off_path.exists() + else: + # Turn on logging + CliRunner().invoke(cli, ["logs", "on"]) + assert not logs_off_path.exists() + + # Run the prompt runner = CliRunner() prompt = "three names for a pet pelican" input = None @@ -85,6 +99,11 @@ def test_llm_default_prompt(mocked_openai, use_stdin, user_path): # Was it logged? rows = list(log_db["responses"].rows) + + if logs_off: + assert len(rows) == 0 + return + assert len(rows) == 1 expected = { "model": "gpt-3.5-turbo", @@ -109,6 +128,8 @@ def test_llm_default_prompt(mocked_openai, use_stdin, user_path): # Test "llm logs" log_result = runner.invoke(cli, ["logs", "-n", "1"], catch_exceptions=False) log_json = json.loads(log_result.output) + + # Should have logged correctly: assert ( log_json[0].items() >= {