Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test: Add Python test infrastructure and OpenAI wrapper tests #478

Open
wants to merge 13 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion .github/workflows/py.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ jobs:
- name: Test whether the Python SDK can be installed
run: |
python -m pip install -e ./core/py[all]
python -m pip install -e ./py[all]
python -m pip install -e ./py[all,dev]
- name: Test whether the Python SDK can be imported
run: |
python -c 'import braintrust'
Expand All @@ -55,3 +55,6 @@ jobs:
- name: Lint the Python SDK
run: |
pylint --errors-only $(git ls-files '*.py')
- name: Run Python tests
run: |
source env.sh && python -m pytest py/tests/ -v
7 changes: 7 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,10 @@ develop: ${VENV_PRE_COMMIT}

fixup:
source env.sh && pre-commit run --all-files

py-tests:
@if [ -z "$(tests)" ]; then \
source env.sh && python -m pytest py/tests/ -v; \
else \
source env.sh && python -m pytest $(tests) -v; \
fi
4 changes: 4 additions & 0 deletions py/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,12 @@
"flake8-isort",
"IPython",
"isort==5.10.1",
"openai",
"pre-commit",
"pytest",
"pytest-asyncio>=0.23.0",
"pytest-httpx<=0.22.0",
"responses",
"twine",
],
"doc": ["pydoc-markdown"],
Expand Down
12 changes: 6 additions & 6 deletions py/src/braintrust/oai.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def gen():
}
)
first = False
all_results.append(item if isinstance(item, dict) else item.dict())
all_results.append(item if isinstance(item, dict) else item.model_dump())
yield item

span.log(**postprocess_streaming_results(all_results))
Expand All @@ -131,7 +131,7 @@ def gen():
should_end = False
return gen()
else:
log_response = raw_response if isinstance(raw_response, dict) else raw_response.dict()
log_response = raw_response if isinstance(raw_response, dict) else raw_response.model_dump()
span.log(
metrics={
"time_to_first_token": time.time() - start,
Expand Down Expand Up @@ -179,7 +179,7 @@ async def gen():
}
)
first = False
all_results.append(item if isinstance(item, dict) else item.dict())
all_results.append(item if isinstance(item, dict) else item.model_dump())
yield item

span.log(**postprocess_streaming_results(all_results))
Expand All @@ -189,7 +189,7 @@ async def gen():
should_end = False
return gen()
else:
log_response = raw_response if isinstance(raw_response, dict) else raw_response.dict()
log_response = raw_response if isinstance(raw_response, dict) else raw_response.model_dump()
span.log(
metrics={
"tokens": log_response["usage"]["total_tokens"],
Expand Down Expand Up @@ -239,7 +239,7 @@ def create(self, *args, **kwargs):
else:
raw_response = create_response

log_response = raw_response if isinstance(raw_response, dict) else raw_response.dict()
log_response = raw_response if isinstance(raw_response, dict) else raw_response.model_dump()
span.log(
metrics={
"tokens": log_response["usage"]["total_tokens"],
Expand All @@ -263,7 +263,7 @@ async def acreate(self, *args, **kwargs):
log_headers(create_response, span)
else:
raw_response = create_response
log_response = raw_response if isinstance(raw_response, dict) else raw_response.dict()
log_response = raw_response if isinstance(raw_response, dict) else raw_response.model_dump()
span.log(
metrics={
"tokens": log_response["usage"]["total_tokens"],
Expand Down
Empty file added py/tests/__init__.py
Empty file.
Empty file added py/tests/test_oai/__init__.py
Empty file.
103 changes: 103 additions & 0 deletions py/tests/test_oai/test_wrap_openai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
import json
import logging
from inspect import iscoroutinefunction
from typing import TYPE_CHECKING

import httpx
import openai
import pytest
from braintrust.oai import wrap_openai
from openai.types import CompletionUsage
from openai.types.chat import ChatCompletion
from openai.types.chat.chat_completion import ChatCompletionMessage, Choice

if TYPE_CHECKING:
reveal_type = print # For type checking only
else:

def reveal_type(obj):
pass # No-op at runtime


logging.basicConfig(level=logging.DEBUG)


@pytest.fixture
def openai_client():
return openai.OpenAI(api_key="sk-test", base_url="https://api.openai.com/v1/")


@pytest.fixture
def mock_completion():
return {
"id": "test-id",
"object": "chat.completion",
"created": 1677652288,
"model": "gpt-3.5-turbo",
"choices": [
{
"index": 0,
"message": {"role": "assistant", "content": "Hello, how can I help you?"},
"finish_reason": "stop",
}
],
"usage": {"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30},
}


@pytest.fixture
def setup_responses(mock_completion, httpx_mock):
httpx_mock.add_response(
method="POST",
url="https://api.openai.com/v1/chat/completions",
json=mock_completion,
headers={"Content-Type": "application/json"},
status_code=200,
)
return httpx_mock


def test_wrap_openai_sync_types(openai_client):
wrapped = wrap_openai(openai_client)
reveal_type(wrapped) # type: ignore # Expected type: openai.OpenAI
reveal_type(wrapped.chat.completions) # type: ignore # Expected type: openai.resources.chat.completions.Completions
assert hasattr(wrapped.chat.completions, "create")
assert not hasattr(wrapped.chat.completions, "acreate")


@pytest.mark.asyncio
async def test_wrap_openai_async_types():
async_client = openai.AsyncOpenAI(
api_key="sk-test", base_url="https://api.openai.com/v1/", default_headers={"OpenAI-Version": "2020-10-01"}
)
wrapped = wrap_openai(async_client)
reveal_type(wrapped) # type: ignore # Expected type: openai.AsyncOpenAI
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is this actually testing anything? I want the unit test not a third party tool testing

reveal_type(wrapped.chat.completions) # type: ignore # Expected type: openai.resources.chat.completions.AsyncCompletions
assert hasattr(wrapped.chat.completions, "create")
assert iscoroutinefunction(wrapped.chat.completions.create)


def test_wrap_openai_sync_response_types(openai_client, mock_completion, setup_responses):
wrapped = wrap_openai(openai_client)
response = wrapped.chat.completions.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello"}])
reveal_type(response) # type: ignore # Expected type: openai.types.chat.ChatCompletion
reveal_type(response.choices[0]) # type: ignore # Expected type: openai.types.chat.chat_completion.Choice
reveal_type(response.usage) # type: ignore # Expected type: openai.types.completion_usage.CompletionUsage
assert isinstance(response, ChatCompletion)
assert isinstance(response.choices[0], Choice)
assert isinstance(response.usage, CompletionUsage)


@pytest.mark.asyncio
async def test_wrap_openai_async_response_types(mock_completion, setup_responses):
async_client = openai.AsyncOpenAI(api_key="sk-test", base_url="https://api.openai.com/v1/")
wrapped = wrap_openai(async_client)
response = await wrapped.chat.completions.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello"}]
)
reveal_type(response) # type: ignore # Expected type: openai.types.chat.ChatCompletion
reveal_type(response.choices[0]) # type: ignore # Expected type: openai.types.chat.chat_completion.Choice
reveal_type(response.usage) # type: ignore # Expected type: openai.types.completion_usage.CompletionUsage
assert isinstance(response, ChatCompletion)
assert isinstance(response.choices[0], Choice)
assert isinstance(response.usage, CompletionUsage)
18 changes: 18 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,21 @@ line-length = 119
[tool.ruff]
line-length = 119
select = ["I"]

[tool.pytest.ini_options]
asyncio_mode = "strict"
asyncio_default_fixture_loop_scope = "function"
log_cli = true
log_cli_level = "DEBUG"

[tool.mypy]
python_version = "3.8"
warn_return_any = true
warn_unused_configs = true
disallow_untyped_defs = true
check_untyped_defs = true
warn_redundant_casts = true
warn_unused_ignores = true
warn_no_return = true
warn_unreachable = true
show_error_codes = true
Loading