mirror of
https://github.com/Hopiu/llm.git
synced 2026-04-26 07:54:45 +00:00
CLI now uses new prompt/stream methods
This commit is contained in:
parent
ffe4b6706d
commit
52cedc03e0
1 changed files with 10 additions and 9 deletions
19
llm/cli.py
19
llm/cli.py
|
|
@ -192,20 +192,21 @@ def prompt(
|
|||
if model.needs_key and not model.key:
|
||||
model.key = get_key(key, model.needs_key, model.key_env_var)
|
||||
|
||||
prompt_kwargs = {}
|
||||
if model.can_stream:
|
||||
prompt_kwargs = {"stream": not no_stream}
|
||||
should_stream = model.can_stream and not no_stream
|
||||
if should_stream:
|
||||
method = model.stream
|
||||
else:
|
||||
no_stream = False
|
||||
method = model.prompt
|
||||
|
||||
if no_stream:
|
||||
chunk = list(model.prompt(prompt, system, **prompt_kwargs))[0]
|
||||
print(chunk)
|
||||
else:
|
||||
for chunk in model.prompt(prompt, system, **prompt_kwargs):
|
||||
response = method(prompt, system)
|
||||
|
||||
if should_stream:
|
||||
for chunk in response:
|
||||
print(chunk, end="")
|
||||
sys.stdout.flush()
|
||||
print("")
|
||||
else:
|
||||
print(response.text())
|
||||
|
||||
# TODO: Figure out OpenAI exception handling
|
||||
# TODO: Log to database
|
||||
|
|
|
|||
Loading…
Reference in a new issue