Skip to content

Commit

Permalink
Merge branch 'feat/optimal-test-suite' into feat/v0.4
Browse files Browse the repository at this point in the history
  • Loading branch information
teocns committed Jan 15, 2025
2 parents 1025af0 + 2a860c8 commit 88ce5d5
Show file tree
Hide file tree
Showing 30 changed files with 1,367 additions and 263 deletions.
57 changes: 48 additions & 9 deletions .github/workflows/python-tests.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,19 @@
# :: Use nektos/act to run this locally
# :: Example:
# :: `act push -j python-tests --matrix python-version:3.10 --container-architecture linux/amd64`
# :: `act push -j unit-tests --matrix python-version:3.10 --container-architecture linux/amd64`
#
# This workflow runs two separate test suites:
# 1. Unit Tests (python-tests job):
# - Runs across Python 3.9 to 3.13
# - Located in tests/unit directory
# - Coverage report uploaded to Codecov for Python 3.11 only
#
# 2. Integration Tests (integration-tests job):
# - Runs only on Python 3.13
# - Located in tests/integration directory
# - Longer timeout (15 min vs 10 min for unit tests)
# - Separate cache for dependencies

name: Python Tests
on:
workflow_dispatch: {}
Expand All @@ -23,10 +36,12 @@ on:
- 'tests/**/*.ipynb'

jobs:
python-tests:
unit-tests:
runs-on: ubuntu-latest
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
AGENTOPS_API_KEY: ${{ secrets.AGENTOPS_API_KEY }}
PYTHONUNBUFFERED: "1"

strategy:
matrix:
Expand All @@ -49,14 +64,10 @@ jobs:
run: |
uv sync --group test --group dev
- name: Run tests with coverage
timeout-minutes: 10
- name: Run unit tests with coverage
timeout-minutes: 5
run: |
uv run -m pytest tests/ -v --cov=agentops --cov-report=xml
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
AGENTOPS_API_KEY: ${{ secrets.AGENTOPS_API_KEY }}
PYTHONUNBUFFERED: "1"
uv run -m pytest tests/unit -v --cov=agentops --cov-report=xml
# Only upload coverage report for python3.11
- name: Upload coverage to Codecov
Expand All @@ -68,3 +79,31 @@ jobs:
flags: unittests
name: codecov-umbrella
fail_ci_if_error: true # Should we?

integration-tests:
runs-on: ubuntu-latest
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
AGENTOPS_API_KEY: ${{ secrets.AGENTOPS_API_KEY }}
PYTHONUNBUFFERED: "1"

steps:
- uses: actions/checkout@v4

- name: Setup UV
uses: astral-sh/setup-uv@v5
continue-on-error: true
with:
python-version: "3.13"
enable-cache: true
cache-suffix: uv-3.13-integration
cache-dependency-glob: "**/pyproject.toml"

- name: Install dependencies
run: |
uv sync --group test --group dev
- name: Run integration tests
timeout-minutes: 5
run: |
uv run pytest tests/integration
35 changes: 17 additions & 18 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -44,27 +44,28 @@ test = [
"openai>=1.0.0,<2.0.0",
"langchain",
"pytest-cov",
"fastapi[standard]",
]

dev = [
# Testing essentials
"pytest>=7.4.0,<8.0.0", # Testing framework with good async support
"pytest-depends", # For testing complex agent workflows
"pytest-asyncio", # Async test support for testing concurrent agent operations
"pytest-mock", # Mocking capabilities for isolating agent components
"pyfakefs", # File system testing
"pytest-recording", # Alternative to pytest-vcr with better Python 3.x support
"vcrpy @ git+https://github.com/kevin1024/vcrpy.git@81978659f1b18bbb7040ceb324a19114e4a4f328",
"pytest>=7.4.0,<8.0.0", # Testing framework with good async support
"pytest-depends", # For testing complex agent workflows
"pytest-asyncio", # Async test support for testing concurrent agent operations
"pytest-mock", # Mocking capabilities for isolating agent components
"pyfakefs", # File system testing
"pytest-recording", # Alternative to pytest-vcr with better Python 3.x support
"vcrpy @ git+https://github.com/kevin1024/vcrpy.git@5f1b20c4ca4a18c1fc8cfe049d7df12ca0659c9b",
# Code quality and type checking
"ruff", # Fast Python linter for maintaining code quality
"mypy", # Static type checking for better reliability
"types-requests", # Type stubs for requests library

"ruff", # Fast Python linter for maintaining code quality
"mypy", # Static type checking for better reliability
"types-requests", # Type stubs for requests library
# HTTP mocking and environment
"requests_mock>=1.11.0", # Mock HTTP requests for testing agent external communications
"python-dotenv", # Environment management for secure testing

"python-dotenv", # Environment management for secure testing
# Agent integration testing
"pytest-sugar>=1.0.0",
"pdbpp>=0.10.3",
]

# CI dependencies
Expand Down Expand Up @@ -97,11 +98,9 @@ max_line_length = 120

[tool.pytest.ini_options]
asyncio_mode = "auto"
asyncio_default_fixture_loop_scope = "function" # WARNING: Changing this may break tests. A `module`-scoped session might be faster, but also unstable.
test_paths = [
"tests",
]
addopts = "--tb=short -p no:warnings"
asyncio_default_fixture_loop_scope = "module" # WARNING: Changing this may break tests. A `module`-scoped session might be faster, but also unstable.
testpaths = ["tests/unit"] # Default to unit tests
addopts = "--tb=short -p no:warnings --import-mode=importlib --ignore=tests/integration" # Ignore integration by default
pythonpath = ["."]
faulthandler_timeout = 30 # Reduced from 60
timeout = 60 # Reduced from 300
Expand Down
Empty file added tests/__init__.py
Empty file.
32 changes: 32 additions & 0 deletions tests/fixtures/event.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
from collections import defaultdict
from typing import TYPE_CHECKING

import pytest

if TYPE_CHECKING:
from pytest_mock import MockerFixture


@pytest.fixture(scope="function")
def llm_event_spy(agentops_client, mocker: "MockerFixture") -> dict[str, "MockerFixture"]:
"""
Fixture that provides spies on both providers' response handling
These fixtures are reset on each test run (function scope). To use it,
simply pass it as an argument to the test function. Example:
```
def test_my_test(llm_event_spy):
# test code here
llm_event_spy["litellm"].assert_called_once()
```
"""
from agentops.llms.providers.anthropic import AnthropicProvider
from agentops.llms.providers.litellm import LiteLLMProvider
from agentops.llms.providers.openai import OpenAiProvider

return {
"litellm": mocker.spy(LiteLLMProvider(agentops_client), "handle_response"),
"openai": mocker.spy(OpenAiProvider(agentops_client), "handle_response"),
"anthropic": mocker.spy(AnthropicProvider(agentops_client), "handle_response"),
}
26 changes: 26 additions & 0 deletions tests/fixtures/packaging.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import builtins
import pytest


@pytest.fixture
def hide_available_pkg(monkeypatch):
"""
Hide the availability of a package by mocking the __import__ function.
Usage:
@pytest.mark.usefixtures('hide_available_pkg')
def test_message():
with pytest.raises(ImportError, match='Install "pkg" to use test_function'):
foo('test_function')
Source:
https://stackoverflow.com/questions/60227582/making-a-python-test-think-an-installed-package-is-not-available
"""
import_orig = builtins.__import__

def mocked_import(name, *args, **kwargs):
if name == "pkg":
raise ImportError()
return import_orig(name, *args, **kwargs)

monkeypatch.setattr(builtins, "__import__", mocked_import)
Loading

0 comments on commit 88ce5d5

Please sign in to comment.