Handle system prompts for completion models, refs #284

This commit is contained in:
Simon Willison 2023-09-18 22:36:38 -07:00
parent fcff36c6bc
commit 4eed871cf1
2 changed files with 16 additions and 6 deletions

View file

@ -337,8 +337,13 @@ class Completion(Chat):
messages = []
if conversation is not None:
for prev_response in conversation.responses:
# We add system prompts in Markdown bold
if prev_response.prompt.system:
messages.append("**{}**".format(prev_response.prompt.system))
messages.append(prev_response.prompt.prompt)
messages.append(prev_response.text())
if prompt.system:
messages.append("**{}**".format(prompt.system))
messages.append(prompt.prompt)
response._prompt_json = {"messages": messages}
kwargs = self.build_kwargs(prompt)

View file

@ -301,7 +301,8 @@ def test_openai_chat_stream(mocked_openai_chat_stream, user_path):
assert result.output == "Hi.\n"
def test_openai_completion(mocked_openai_completion, user_path):
@pytest.mark.parametrize("system", (None, "a system prompt"))
def test_openai_completion(mocked_openai_completion, user_path, system):
log_path = user_path / "logs.db"
log_db = sqlite_utils.Database(str(log_path))
log_db["responses"].delete_where()
@ -315,28 +316,32 @@ def test_openai_completion(mocked_openai_completion, user_path):
"--no-stream",
"--key",
"x",
],
]
+ (["--system", system] if system else []),
catch_exceptions=False,
)
assert result.exit_code == 0
assert result.output == "\n\nThis is indeed a test\n"
# Should have requested 256 tokens
expected_prompt = "Say this is a test"
if system:
expected_prompt = "**a system prompt**\n" + expected_prompt
assert json.loads(mocked_openai_completion.last_request.text) == {
"model": "gpt-3.5-turbo-instruct",
"prompt": "Say this is a test",
"prompt": expected_prompt,
"stream": False,
"max_tokens": 256,
}
expected_messages = json.dumps(expected_prompt.split("\n"))
# Check it was logged
rows = list(log_db["responses"].rows)
assert len(rows) == 1
expected = {
"model": "gpt-3.5-turbo-instruct",
"prompt": "Say this is a test",
"system": None,
"prompt_json": '{"messages": ["Say this is a test"]}',
"system": system,
"prompt_json": '{"messages": ' + expected_messages + "}",
"options_json": "{}",
"response": "\n\nThis is indeed a test",
}