diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index bd159f2..8fb4e36 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -11,6 +11,7 @@ jobs: strategy: matrix: python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + pydantic: ["1.10.2", ">=2.0.0"] steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} @@ -22,6 +23,7 @@ jobs: - name: Install dependencies run: | pip install -e '.[test]' + pip install pydantic==${{ matrix.pydantic }} - name: Run tests run: | pytest diff --git a/llm/cli.py b/llm/cli.py index 129d211..b310af8 100644 --- a/llm/cli.py +++ b/llm/cli.py @@ -868,7 +868,7 @@ def load_template(name): return Template(name=name, prompt=loaded) loaded["name"] = name try: - return Template.model_validate(loaded) + return Template(**loaded) except pydantic.ValidationError as ex: msg = "A validation error occurred:\n" msg += render_errors(ex.errors()) diff --git a/llm/models.py b/llm/models.py index 585f000..6d0add9 100644 --- a/llm/models.py +++ b/llm/models.py @@ -143,7 +143,7 @@ class Response(ABC): "prompt_json": self._prompt_json, "options_json": { key: value - for key, value in self.prompt.options.model_dump().items() + for key, value in dict(self.prompt.options).items() if value is not None }, "response": self.text(), diff --git a/tests/test_cli_openai_models.py b/tests/test_cli_openai_models.py index 6af7107..a47499f 100644 --- a/tests/test_cli_openai_models.py +++ b/tests/test_cli_openai_models.py @@ -51,14 +51,7 @@ def test_openai_options_min_max(mocked_models): for option, [min_val, max_val] in options.items(): result = runner.invoke(cli, ["-m", "chatgpt", "-o", option, "-10"]) assert result.exit_code == 1 - assert ( - result.output - == f"Error: {option}\n Input should be greater than or equal to {min_val}\n" - ) - - result = runner.invoke(cli, ["-m", "chatgpt", "-o", option, "10"]) - assert result.exit_code == 1 - assert ( - result.output - == f"Error: {option}\n Input should be less than or equal to {max_val}\n" - ) + assert f"greater than or equal to {min_val}" in result.output + result2 = runner.invoke(cli, ["-m", "chatgpt", "-o", option, "10"]) + assert result2.exit_code == 1 + assert f"less than or equal to {max_val}" in result2.output