mirror of
https://github.com/Hopiu/llm.git
synced 2026-04-27 00:14:46 +00:00
Stream by default, added--no-stream option, closes #25
Also finished the work needed to remove --code, refs #24
This commit is contained in:
parent
d72ac8779a
commit
6545ce9da6
4 changed files with 23 additions and 34 deletions
|
|
@ -4,13 +4,13 @@ The default command for this is `llm prompt` - you can use `llm` instead if you
|
|||
|
||||
## Executing a prompt
|
||||
|
||||
To run a prompt:
|
||||
To run a prompt, streaming tokens as they come in:
|
||||
|
||||
llm 'Ten names for cheesecakes'
|
||||
|
||||
To stream the results a token at a time:
|
||||
To disable streaming and only return the response once it has completed:
|
||||
|
||||
llm 'Ten names for cheesecakes' -s
|
||||
llm 'Ten names for cheesecakes' --no-stream
|
||||
|
||||
To switch from ChatGPT 3.5 (the default) to GPT-4 if you have access:
|
||||
|
||||
|
|
@ -52,10 +52,10 @@ This pattern of using `$(command)` inside a double quoted string is a useful way
|
|||
|
||||
You can use `--system '...'` to set a system prompt.
|
||||
|
||||
llm 'SQL to calculate total sales by month' -s \
|
||||
llm 'SQL to calculate total sales by month' \
|
||||
--system 'You are an exaggerated sentient cheesecake that knows SQL and talks about cheesecake a lot'
|
||||
|
||||
This is useful for piping content to standard input, for example:
|
||||
|
||||
curl -s 'https://simonwillison.net/2023/May/15/per-interpreter-gils/' | \
|
||||
llm --system 'Suggest topics for this post as a JSON array' --stream
|
||||
llm --system 'Suggest topics for this post as a JSON array'
|
||||
|
|
|
|||
35
llm/cli.py
35
llm/cli.py
|
|
@ -6,7 +6,6 @@ import openai
|
|||
import os
|
||||
import pathlib
|
||||
from platformdirs import user_data_dir
|
||||
import requests
|
||||
import sqlite_utils
|
||||
import sys
|
||||
import warnings
|
||||
|
|
@ -31,7 +30,7 @@ def cli():
|
|||
@click.option("--system", help="System prompt to use")
|
||||
@click.option("-4", "--gpt4", is_flag=True, help="Use GPT-4")
|
||||
@click.option("-m", "--model", help="Model to use")
|
||||
@click.option("-s", "--stream", is_flag=True, help="Stream output")
|
||||
@click.option("--no-stream", is_flag=True, help="Do not stream output")
|
||||
@click.option("-n", "--no-log", is_flag=True, help="Don't log to database")
|
||||
@click.option(
|
||||
"_continue",
|
||||
|
|
@ -48,7 +47,7 @@ def cli():
|
|||
type=int,
|
||||
)
|
||||
@click.option("--key", help="API key to use")
|
||||
def prompt(prompt, system, gpt4, model, stream, no_log, code, _continue, chat_id, key):
|
||||
def prompt(prompt, system, gpt4, model, no_stream, no_log, _continue, chat_id, key):
|
||||
"Execute a prompt against on OpenAI model"
|
||||
if prompt is None:
|
||||
# Read from stdin instead
|
||||
|
|
@ -78,7 +77,15 @@ def prompt(prompt, system, gpt4, model, stream, no_log, code, _continue, chat_id
|
|||
if model is None:
|
||||
model = history_model or DEFAULT_MODEL
|
||||
try:
|
||||
if stream:
|
||||
if no_stream:
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
content = response.choices[0].message.content
|
||||
log(no_log, "openai", system, prompt, content, model, chat_id)
|
||||
print(content)
|
||||
else:
|
||||
response = []
|
||||
for chunk in openai.ChatCompletion.create(
|
||||
model=model,
|
||||
|
|
@ -92,16 +99,6 @@ def prompt(prompt, system, gpt4, model, stream, no_log, code, _continue, chat_id
|
|||
sys.stdout.flush()
|
||||
print("")
|
||||
log(no_log, "openai", system, prompt, "".join(response), model, chat_id)
|
||||
else:
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
content = response.choices[0].message.content
|
||||
log(no_log, "openai", system, prompt, content, model, chat_id)
|
||||
if code:
|
||||
content = unwrap_markdown(content)
|
||||
print(content)
|
||||
except openai.error.AuthenticationError as ex:
|
||||
raise click.ClickException("{}: {}".format(ex.error.type, ex.error.code))
|
||||
except openai.error.OpenAIError as ex:
|
||||
|
|
@ -279,13 +276,3 @@ def get_history(chat_id):
|
|||
"rowid = ? or chat_id = ?", [chat_id, chat_id], order_by="rowid"
|
||||
)
|
||||
return chat_id, rows
|
||||
|
||||
|
||||
def unwrap_markdown(content):
|
||||
# Remove first and last line if they are triple backticks
|
||||
lines = [l for l in content.split("\n")]
|
||||
if lines[0].strip().startswith("```"):
|
||||
lines = lines[1:]
|
||||
if lines[-1].strip() == "```":
|
||||
lines = lines[:-1]
|
||||
return "\n".join(lines)
|
||||
|
|
|
|||
|
|
@ -59,21 +59,23 @@ def test_uses_correct_key(requests_mock, monkeypatch, tmpdir):
|
|||
|
||||
runner = CliRunner()
|
||||
# Called without --key uses environment variable
|
||||
result = runner.invoke(cli, ["hello"], catch_exceptions=False)
|
||||
result = runner.invoke(cli, ["hello", "--no-stream"], catch_exceptions=False)
|
||||
assert result.exit_code == 0
|
||||
assert_key("from-env")
|
||||
# Called without --key and with no environment variable uses keys.json
|
||||
monkeypatch.setenv("OPENAI_API_KEY", "")
|
||||
result2 = runner.invoke(cli, ["hello"], catch_exceptions=False)
|
||||
result2 = runner.invoke(cli, ["hello", "--no-stream"], catch_exceptions=False)
|
||||
assert result2.exit_code == 0
|
||||
assert_key("from-keys-file")
|
||||
# Called with --key name-in-keys.json uses that value
|
||||
result3 = runner.invoke(cli, ["hello", "--key", "other"], catch_exceptions=False)
|
||||
result3 = runner.invoke(
|
||||
cli, ["hello", "--key", "other", "--no-stream"], catch_exceptions=False
|
||||
)
|
||||
assert result3.exit_code == 0
|
||||
assert_key("other-key")
|
||||
# Called with --key something-else uses exactly that
|
||||
result4 = runner.invoke(
|
||||
cli, ["hello", "--key", "custom-key"], catch_exceptions=False
|
||||
cli, ["hello", "--key", "custom-key", "--no-stream"], catch_exceptions=False
|
||||
)
|
||||
assert result4.exit_code == 0
|
||||
assert_key("custom-key")
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ def test_llm_default_prompt(requests_mock, use_stdin):
|
|||
runner = CliRunner()
|
||||
prompt = "three names for a pet pelican"
|
||||
input = None
|
||||
args = []
|
||||
args = ["--no-stream"]
|
||||
if use_stdin:
|
||||
input = prompt
|
||||
else:
|
||||
|
|
|
|||
Loading…
Reference in a new issue