mirror of
https://github.com/Hopiu/llm.git
synced 2026-03-16 20:50:25 +00:00
Fixed bug with model default options and aliases, closes #968
This commit is contained in:
parent
9a39af82cd
commit
16770611ca
3 changed files with 49 additions and 1 deletions
|
|
@ -667,7 +667,7 @@ def prompt(
|
|||
raise click.ClickException(render_errors(ex.errors()))
|
||||
|
||||
# Add on any default model options
|
||||
default_options = get_model_options(model_id)
|
||||
default_options = get_model_options(model.model_id)
|
||||
for key_, value in default_options.items():
|
||||
if key_ not in validated_options:
|
||||
validated_options[key_] = value
|
||||
|
|
|
|||
|
|
@ -86,9 +86,20 @@ class EchoModel(llm.Model):
|
|||
model_id = "echo"
|
||||
can_stream = True
|
||||
|
||||
class Options(llm.Options):
|
||||
example_int: Optional[int] = Field(
|
||||
description="Example integer option", default=None
|
||||
)
|
||||
|
||||
def execute(self, prompt, stream, response, conversation):
|
||||
yield "system:\n{}\n\n".format(prompt.system or "")
|
||||
yield "prompt:\n{}".format(prompt.prompt or "")
|
||||
# Only show non-null options
|
||||
non_null_options = {
|
||||
k: v for k, v in prompt.options.model_dump().items() if v is not None
|
||||
}
|
||||
if non_null_options:
|
||||
yield "\n\noptions: {}".format(json.dumps(non_null_options))
|
||||
|
||||
|
||||
class MockKeyModel(llm.KeyModel):
|
||||
|
|
|
|||
|
|
@ -79,3 +79,40 @@ def test_model_options_clear(user_path):
|
|||
assert result2.exit_code == 0
|
||||
data = json.loads(path.read_text("utf-8"))
|
||||
assert data == {"gpt-4o": {"temperature": 0.7}}
|
||||
|
||||
|
||||
def test_prompt_uses_model_options(user_path):
|
||||
path = user_path / "model_options.json"
|
||||
path.write_text("{}", "utf-8")
|
||||
# Prompt should not use an option
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["-m", "echo", "prompt"])
|
||||
assert result.exit_code == 0
|
||||
assert result.output == "system:\n\n\nprompt:\nprompt\n"
|
||||
# Now set an option
|
||||
path.write_text(json.dumps({"echo": {"example_int": 1}}), "utf-8")
|
||||
|
||||
result2 = runner.invoke(cli, ["-m", "echo", "prompt"])
|
||||
assert result2.exit_code == 0
|
||||
assert (
|
||||
result2.output
|
||||
== 'system:\n\n\nprompt:\nprompt\n\noptions: {"example_int": 1}\n'
|
||||
)
|
||||
|
||||
# Option can be over-ridden
|
||||
result3 = runner.invoke(cli, ["-m", "echo", "prompt", "-o", "example_int", "2"])
|
||||
assert result3.exit_code == 0
|
||||
assert (
|
||||
result3.output
|
||||
== 'system:\n\n\nprompt:\nprompt\n\noptions: {"example_int": 2}\n'
|
||||
)
|
||||
|
||||
# Using an alias should also pick up that option
|
||||
aliases_path = user_path / "aliases.json"
|
||||
aliases_path.write_text('{"e": "echo"}', "utf-8")
|
||||
result4 = runner.invoke(cli, ["-m", "e", "prompt"])
|
||||
assert result4.exit_code == 0
|
||||
assert (
|
||||
result4.output
|
||||
== 'system:\n\n\nprompt:\nprompt\n\noptions: {"example_int": 1}\n'
|
||||
)
|
||||
|
|
|
|||
Loading…
Reference in a new issue