Ditch SimpleEcho entirely for Echo, closes #1061

This commit is contained in:
Simon Willison 2025-05-20 22:02:04 -07:00
parent bd2180df7d
commit 0ee1ba3a65
7 changed files with 94 additions and 67 deletions

View file

@ -67,7 +67,7 @@ test = [
"types-click",
"types-PyYAML",
"types-setuptools",
"llm-echo==0.3a1",
"llm-echo==0.3a2",
]
[build-system]

View file

@ -83,31 +83,6 @@ class MockModel(llm.Model):
)
class SimpleEchoModel(llm.Model):
model_id = "simple-echo"
can_stream = True
attachment_types = {"image/png"}
class Options(llm.Options):
example_int: Optional[int] = Field(
description="Example integer option", default=None
)
def execute(self, prompt, stream, response, conversation):
yield "system:\n{}\n\n".format(prompt.system or "")
yield "prompt:\n{}".format(prompt.prompt or "")
# Only show non-null options
non_null_options = {
k: v for k, v in prompt.options.model_dump().items() if v is not None
}
if non_null_options:
yield "\n\noptions: {}".format(json.dumps(non_null_options))
if prompt.attachments:
yield "\n\nattachments:\n"
for attachment in prompt.attachments:
yield f" - {attachment.url}\n"
class MockKeyModel(llm.KeyModel):
model_id = "mock_key"
needs_key = "mock"
@ -234,20 +209,19 @@ def register_embed_demo_model(embed_demo, mock_model, async_mock_model):
@pytest.fixture(autouse=True)
def register_echo_models():
class EchoModelsPlugin:
__name__ = "EchoModelsPlugin"
def register_echo_model():
class EchoModelPlugin:
__name__ = "EchoModelPlugin"
@llm.hookimpl
def register_models(self, register):
register(SimpleEchoModel())
register(llm_echo.Echo(), llm_echo.EchoAsync())
pm.register(EchoModelsPlugin(), name="undo-EchoModelsPlugin")
pm.register(EchoModelPlugin(), name="undo-EchoModelPlugin")
try:
yield
finally:
pm.unregister(name="undo-EchoModelsPlugin")
pm.unregister(name="undo-EchoModelPlugin")
@pytest.fixture

View file

@ -86,35 +86,53 @@ def test_prompt_uses_model_options(user_path):
path.write_text("{}", "utf-8")
# Prompt should not use an option
runner = CliRunner()
result = runner.invoke(cli, ["-m", "simple-echo", "prompt"])
result = runner.invoke(cli, ["-m", "echo", "prompt"])
assert result.exit_code == 0
assert result.output == "system:\n\n\nprompt:\nprompt\n"
# Now set an option
path.write_text(json.dumps({"simple-echo": {"example_int": 1}}), "utf-8")
assert json.loads(result.output) == {
"prompt": "prompt",
"system": "",
"attachments": [],
"stream": True,
"previous": [],
}
result2 = runner.invoke(cli, ["-m", "simple-echo", "prompt"])
# Now set an option
path.write_text(json.dumps({"echo": {"example_bool": True}}), "utf-8")
result2 = runner.invoke(cli, ["-m", "echo", "prompt"])
assert result2.exit_code == 0
assert (
result2.output
== 'system:\n\n\nprompt:\nprompt\n\noptions: {"example_int": 1}\n'
)
assert json.loads(result2.output) == {
"prompt": "prompt",
"system": "",
"attachments": [],
"stream": True,
"previous": [],
"options": {"example_bool": True},
}
# Option can be over-ridden
result3 = runner.invoke(
cli, ["-m", "simple-echo", "prompt", "-o", "example_int", "2"]
cli, ["-m", "echo", "prompt", "-o", "example_bool", "false"]
)
assert result3.exit_code == 0
assert (
result3.output
== 'system:\n\n\nprompt:\nprompt\n\noptions: {"example_int": 2}\n'
)
assert json.loads(result3.output) == {
"prompt": "prompt",
"system": "",
"attachments": [],
"stream": True,
"previous": [],
"options": {"example_bool": False},
}
# Using an alias should also pick up that option
aliases_path = user_path / "aliases.json"
aliases_path.write_text('{"e": "simple-echo"}', "utf-8")
aliases_path.write_text('{"e": "echo"}', "utf-8")
result4 = runner.invoke(cli, ["-m", "e", "prompt"])
assert result4.exit_code == 0
assert (
result4.output
== 'system:\n\n\nprompt:\nprompt\n\noptions: {"example_int": 1}\n'
)
assert json.loads(result4.output) == {
"prompt": "prompt",
"system": "",
"attachments": [],
"stream": True,
"previous": [],
"options": {"example_bool": True},
}

View file

@ -625,7 +625,7 @@ def test_schema(mock_model, use_pydantic):
def test_model_environment_variable(monkeypatch):
monkeypatch.setenv("LLM_MODEL", "simple-echo")
monkeypatch.setenv("LLM_MODEL", "echo")
runner = CliRunner()
result = runner.invoke(
cli,
@ -633,7 +633,13 @@ def test_model_environment_variable(monkeypatch):
catch_exceptions=False,
)
assert result.exit_code == 0
assert result.output == "system:\nsys\n\nprompt:\nhello\n"
assert json.loads(result.output) == {
"prompt": "hello",
"system": "sys",
"attachments": [],
"stream": False,
"previous": [],
}
@pytest.mark.parametrize("use_filename", (True, False))

View file

@ -849,7 +849,7 @@ def test_logs_backup(logs_db):
assert not logs_db.tables
runner = CliRunner()
with runner.isolated_filesystem():
runner.invoke(cli, ["-m", "simple-echo", "simple prompt"])
runner.invoke(cli, ["-m", "echo", "simple prompt"])
assert logs_db.tables
expected_path = pathlib.Path("backup.db")
assert not expected_path.exists()

View file

@ -141,11 +141,16 @@ def test_register_fragment_loaders(logs_db, httpx_mock):
# Test the CLI command
runner = CliRunner()
result = runner.invoke(
cli.cli, ["-m", "simple-echo", "-f", "three:x"], catch_exceptions=False
cli.cli, ["-m", "echo", "-f", "three:x"], catch_exceptions=False
)
assert result.exit_code == 0
expected = "prompt:\n" "one:x\n" "two:x\n" "three:x\n"
assert expected in result.output
assert json.loads(result.output) == {
"prompt": "one:x\ntwo:x\nthree:x",
"system": "",
"attachments": [],
"stream": True,
"previous": [],
}
# And the llm fragments loaders command:
result2 = runner.invoke(cli.cli, ["fragments", "loaders"])
assert result2.exit_code == 0
@ -163,7 +168,7 @@ def test_register_fragment_loaders(logs_db, httpx_mock):
# Test the one that includes an attachment
result3 = runner.invoke(
cli.cli, ["-m", "simple-echo", "-f", "mixed:x"], catch_exceptions=False
cli.cli, ["-m", "echo", "-f", "mixed:x"], catch_exceptions=False
)
assert result3.exit_code == 0
result3.output.strip == textwrap.dedent(
@ -297,7 +302,7 @@ def test_plugins_command():
result = runner.invoke(cli.cli, ["plugins"])
assert result.exit_code == 0
expected = [
{"name": "EchoModelsPlugin", "hooks": ["register_models"]},
{"name": "EchoModelPlugin", "hooks": ["register_models"]},
{
"name": "MockModelsPlugin",
"hooks": ["register_embedding_models", "register_models"],

View file

@ -334,14 +334,29 @@ def test_execute_prompt_with_a_template(
@pytest.mark.parametrize(
"template,expected",
(
("system: system\nprompt: prompt", "system:\nsystem\n\nprompt:\nprompt"),
(
"system: system\nprompt: prompt",
{
"prompt": "prompt",
"system": "system",
"attachments": [],
"stream": True,
"previous": [],
},
),
(
"prompt: |\n This is\n ```\n code to extract\n ```",
"system:\n\n\nprompt:\nThis is\n```\ncode to extract\n```",
{
"prompt": "This is\n```\ncode to extract\n```",
"system": "",
"attachments": [],
"stream": True,
"previous": [],
},
),
# Now try that with extract: true
(
"extract: true\nprompt: |\n This is\n ```\n code to extract\n ```",
'extract: true\nprompt: |\n {"raw": "This is\\n```\\ncode to extract\\n```"}',
"code to extract",
),
),
@ -356,11 +371,14 @@ def test_execute_prompt_from_template_url(httpx_mock, template, expected):
runner = CliRunner()
result = runner.invoke(
cli,
["-t", "https://example.com/prompt.yaml", "-m", "simple-echo"],
["-t", "https://example.com/prompt.yaml", "-m", "echo"],
catch_exceptions=False,
)
assert result.exit_code == 0
assert result.output.strip() == expected.strip()
if isinstance(expected, dict):
assert json.loads(result.output.strip()) == expected
else:
assert result.output.strip() == expected
def test_execute_prompt_from_template_path():
@ -370,8 +388,14 @@ def test_execute_prompt_from_template_path():
path.write_text("system: system\nprompt: prompt", "utf-8")
result = runner.invoke(
cli,
["-t", str(path), "-m", "simple-echo"],
["-t", str(path), "-m", "echo"],
catch_exceptions=False,
)
assert result.exit_code == 0, result.output
assert result.output.strip() == "system:\nsystem\n\nprompt:\nprompt"
assert json.loads(result.output) == {
"prompt": "prompt",
"system": "system",
"attachments": [],
"stream": True,
"previous": [],
}