From 31b02ecfd6af4df45fbc5b39aa97205e6401143c Mon Sep 17 00:00:00 2001 From: Simon Willison Date: Mon, 6 Nov 2023 10:17:36 -0800 Subject: [PATCH] Actually do the gpt-4-turbo work in a branch, refs #323 This reverts commit 2bfd039ff4c7972e11d8957a987d9c509733b4e4. --- llm/default_plugins/openai_models.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/llm/default_plugins/openai_models.py b/llm/default_plugins/openai_models.py index 4b3ede6..d4947fa 100644 --- a/llm/default_plugins/openai_models.py +++ b/llm/default_plugins/openai_models.py @@ -31,13 +31,11 @@ def register_models(register): register(Chat("gpt-3.5-turbo"), aliases=("3.5", "chatgpt")) register(Chat("gpt-3.5-turbo-16k"), aliases=("chatgpt-16k", "3.5-16k")) register(Chat("gpt-4"), aliases=("4", "gpt4")) - register(Chat("gpt-4-turbo"), aliases=("4-turbo", "4t")) register(Chat("gpt-4-32k"), aliases=("4-32k",)) register( Completion("gpt-3.5-turbo-instruct", default_max_tokens=256), aliases=("3.5-instruct", "chatgpt-instruct"), ) - # Load extra models extra_path = llm.user_dir() / "extra-openai-models.yaml" if not extra_path.exists():