From 73bbbec3722bca37ba329c7fdf7da9e408251ed3 Mon Sep 17 00:00:00 2001 From: Simon Willison Date: Mon, 13 May 2024 12:49:45 -0700 Subject: [PATCH] gpt-4o model, refs #490 --- docs/aliases.md | 1 + docs/openai-models.md | 14 +++++++++++--- docs/usage.md | 10 ++++++++++ llm/default_plugins/openai_models.py | 2 ++ 4 files changed, 24 insertions(+), 3 deletions(-) diff --git a/docs/aliases.md b/docs/aliases.md index b334cd8..85d52e5 100644 --- a/docs/aliases.md +++ b/docs/aliases.md @@ -29,6 +29,7 @@ gpt4 : gpt-4 gpt-4-turbo : gpt-4-turbo-preview 4-turbo : gpt-4-turbo-preview 4t : gpt-4-turbo-preview +4o : gpt-4o 3.5-instruct : gpt-3.5-turbo-instruct chatgpt-instruct : gpt-3.5-turbo-instruct ada : ada-002 (embedding) diff --git a/docs/openai-models.md b/docs/openai-models.md index 62a45f9..a8d9ae4 100644 --- a/docs/openai-models.md +++ b/docs/openai-models.md @@ -23,16 +23,24 @@ Then paste in the API key. Run `llm models` for a full list of available models. The OpenAI models supported by LLM are: + ``` OpenAI Chat: gpt-3.5-turbo (aliases: 3.5, chatgpt) -OpenAI Chat: gpt-3.5-turbo-16k (aliases: chatgpt-16k, 3.5-16k, turbo) +OpenAI Chat: gpt-3.5-turbo-16k (aliases: chatgpt-16k, 3.5-16k) OpenAI Chat: gpt-4 (aliases: 4, gpt4) OpenAI Chat: gpt-4-32k (aliases: 4-32k) OpenAI Chat: gpt-4-1106-preview OpenAI Chat: gpt-4-0125-preview OpenAI Chat: gpt-4-turbo-preview (aliases: gpt-4-turbo, 4-turbo, 4t) -OpenAI Completion: gpt-3.5-turbo-instruct (aliases: 3.5-instruct, chatgpt-instruct, instruct) -``` +OpenAI Chat: gpt-4o (aliases: 4o) +OpenAI Completion: gpt-3.5-turbo-instruct (aliases: 3.5-instruct, chatgpt-instruct)``` + See [the OpenAI models documentation](https://platform.openai.com/docs/models) for details of each of these. diff --git a/docs/usage.md b/docs/usage.md index 9997464..e037186 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -315,6 +315,16 @@ OpenAI Chat: gpt-4-turbo-preview (aliases: gpt-4-turbo, 4-turbo, 4t) logit_bias: dict, str seed: int json_object: boolean +OpenAI Chat: gpt-4o (aliases: 4o) + temperature: float + max_tokens: int + top_p: float + frequency_penalty: float + presence_penalty: float + stop: str + logit_bias: dict, str + seed: int + json_object: boolean OpenAI Completion: gpt-3.5-turbo-instruct (aliases: 3.5-instruct, chatgpt-instruct) temperature: float What sampling temperature to use, between 0 and 2. Higher values like diff --git a/llm/default_plugins/openai_models.py b/llm/default_plugins/openai_models.py index 88958e9..817919a 100644 --- a/llm/default_plugins/openai_models.py +++ b/llm/default_plugins/openai_models.py @@ -31,6 +31,8 @@ def register_models(register): register(Chat("gpt-4-1106-preview")) register(Chat("gpt-4-0125-preview")) register(Chat("gpt-4-turbo-preview"), aliases=("gpt-4-turbo", "4-turbo", "4t")) + # GPT-4o + register(Chat("gpt-4o"), aliases=("4o",)) # The -instruct completion model register( Completion("gpt-3.5-turbo-instruct", default_max_tokens=256),