mirror of
https://github.com/Hopiu/llm.git
synced 2026-04-27 00:14:46 +00:00
LLM_OPENAI_SHOW_RESPONSES=1 debug trick, closes #286
This commit is contained in:
parent
4d18da4e11
commit
ae14ce4add
2 changed files with 20 additions and 0 deletions
|
|
@ -18,6 +18,16 @@ To run the tests:
|
|||
|
||||
pytest
|
||||
|
||||
## Debugging tricks
|
||||
|
||||
The default OpenAI plugin has a debugging mechanism for showing the exact responses that came back from the OpenAI API.
|
||||
|
||||
Set the `LLM_OPENAI_SHOW_RESPONSES` environment variable like this:
|
||||
```bash
|
||||
LLM_OPENAI_SHOW_RESPONSES=1 llm -m chatgpt 'three word slogan for an an otter-run bakery'
|
||||
```
|
||||
This will output the response (including streaming responses) to standard error, as shown in [issues 286](https://github.com/simonw/llm/issues/286).
|
||||
|
||||
## Documentation
|
||||
|
||||
Documentation for this project uses [MyST](https://myst-parser.readthedocs.io/) - it is written in Markdown and rendered using Sphinx.
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from llm.utils import dicts_to_table_string
|
|||
import click
|
||||
import datetime
|
||||
import openai
|
||||
import os
|
||||
|
||||
try:
|
||||
from pydantic import field_validator, Field # type: ignore
|
||||
|
|
@ -15,6 +16,15 @@ from typing import List, Iterable, Iterator, Optional, Union
|
|||
import json
|
||||
import yaml
|
||||
|
||||
if os.environ.get("LLM_OPENAI_SHOW_RESPONSES"):
|
||||
|
||||
def log_response(response, *args, **kwargs):
|
||||
click.echo(response.text, err=True)
|
||||
return response
|
||||
|
||||
openai.requestssession = requests.Session()
|
||||
openai.requestssession.hooks["response"].append(log_response)
|
||||
|
||||
|
||||
@hookimpl
|
||||
def register_models(register):
|
||||
|
|
|
|||
Loading…
Reference in a new issue