From 50222ca2a6c680bb0e712b3bc8a1813d83fa55a0 Mon Sep 17 00:00:00 2001 From: Andrew Liu <159852527+aliu39@users.noreply.github.com> Date: Wed, 18 Dec 2024 20:33:36 -0800 Subject: [PATCH 1/7] feat(flags): Add integration for custom tracking of flag evaluations (#3860) * Add new integration and unit tests * Test flag values for LD and OF threaded/asyncio, not just flag names * update ffIntegration test to be e2e, and fix LRU copy bug * make a helper fixture and test error processor in original thread * Move api to top-level, rename to add_flag * Add docstrs * Rename to add_feature_flag * Rm extra import in test_lru_cache * Revert lru comment * Type annotate * Review comments * Update launchdarkly and openfeature tests to be e2e * Update docstrs * Skip threading test for <3.7 * Skip ffs asyncio test if 3.6 * undo 'skip threading test' * Try commenting out asyncio * Use importorskip * Import order --------- Co-authored-by: Anton Pirker --- sentry_sdk/integrations/featureflags.py | 44 ++++++ tests/conftest.py | 11 ++ tests/integrations/featureflags/__init__.py | 0 .../featureflags/test_featureflags.py | 133 ++++++++++++++++++ .../launchdarkly/test_launchdarkly.py | 119 +++++++++++++--- .../openfeature/test_openfeature.py | 113 ++++++++++++--- 6 files changed, 377 insertions(+), 43 deletions(-) create mode 100644 sentry_sdk/integrations/featureflags.py create mode 100644 tests/integrations/featureflags/__init__.py create mode 100644 tests/integrations/featureflags/test_featureflags.py diff --git a/sentry_sdk/integrations/featureflags.py b/sentry_sdk/integrations/featureflags.py new file mode 100644 index 0000000000..46947eec72 --- /dev/null +++ b/sentry_sdk/integrations/featureflags.py @@ -0,0 +1,44 @@ +from sentry_sdk.flag_utils import flag_error_processor + +import sentry_sdk +from sentry_sdk.integrations import Integration + + +class FeatureFlagsIntegration(Integration): + """ + Sentry integration for capturing feature flags on error events. To manually buffer flag data, + call `integrations.featureflags.add_feature_flag`. We recommend you do this on each flag + evaluation. + + See the [feature flag documentation](https://develop.sentry.dev/sdk/expected-features/#feature-flags) + for more information. + + @example + ``` + import sentry_sdk + from sentry_sdk.integrations.featureflags import FeatureFlagsIntegration, add_feature_flag + + sentry_sdk.init(dsn="my_dsn", integrations=[FeatureFlagsIntegration()]); + + add_feature_flag('my-flag', true); + sentry_sdk.capture_exception(Exception('broke')); // 'my-flag' should be captured on this Sentry event. + ``` + """ + + identifier = "featureflags" + + @staticmethod + def setup_once(): + # type: () -> None + scope = sentry_sdk.get_current_scope() + scope.add_error_processor(flag_error_processor) + + +def add_feature_flag(flag, result): + # type: (str, bool) -> None + """ + Records a flag and its value to be sent on subsequent error events by FeatureFlagsIntegration. + We recommend you do this on flag evaluations. Flags are buffered per Sentry scope. + """ + flags = sentry_sdk.get_current_scope().flags + flags.set(flag, result) diff --git a/tests/conftest.py b/tests/conftest.py index 64527c1e36..c0383d94b7 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -184,6 +184,17 @@ def reset_integrations(): _installed_integrations.clear() +@pytest.fixture +def uninstall_integration(): + """Use to force the next call to sentry_init to re-install/setup an integration.""" + + def inner(identifier): + _processed_integrations.discard(identifier) + _installed_integrations.discard(identifier) + + return inner + + @pytest.fixture def sentry_init(request): def inner(*a, **kw): diff --git a/tests/integrations/featureflags/__init__.py b/tests/integrations/featureflags/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integrations/featureflags/test_featureflags.py b/tests/integrations/featureflags/test_featureflags.py new file mode 100644 index 0000000000..539e910607 --- /dev/null +++ b/tests/integrations/featureflags/test_featureflags.py @@ -0,0 +1,133 @@ +import concurrent.futures as cf +import sys + +import pytest + +import sentry_sdk +from sentry_sdk.integrations.featureflags import ( + FeatureFlagsIntegration, + add_feature_flag, +) + + +def test_featureflags_integration(sentry_init, capture_events, uninstall_integration): + uninstall_integration(FeatureFlagsIntegration.identifier) + sentry_init(integrations=[FeatureFlagsIntegration()]) + + add_feature_flag("hello", False) + add_feature_flag("world", True) + add_feature_flag("other", False) + + events = capture_events() + sentry_sdk.capture_exception(Exception("something wrong!")) + + assert len(events) == 1 + assert events[0]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": False}, + {"flag": "world", "result": True}, + {"flag": "other", "result": False}, + ] + } + + +def test_featureflags_integration_threaded( + sentry_init, capture_events, uninstall_integration +): + uninstall_integration(FeatureFlagsIntegration.identifier) + sentry_init(integrations=[FeatureFlagsIntegration()]) + events = capture_events() + + # Capture an eval before we split isolation scopes. + add_feature_flag("hello", False) + + def task(flag_key): + # Creates a new isolation scope for the thread. + # This means the evaluations in each task are captured separately. + with sentry_sdk.isolation_scope(): + add_feature_flag(flag_key, False) + # use a tag to identify to identify events later on + sentry_sdk.set_tag("task_id", flag_key) + sentry_sdk.capture_exception(Exception("something wrong!")) + + # Run tasks in separate threads + with cf.ThreadPoolExecutor(max_workers=2) as pool: + pool.map(task, ["world", "other"]) + + # Capture error in original scope + sentry_sdk.set_tag("task_id", "0") + sentry_sdk.capture_exception(Exception("something wrong!")) + + assert len(events) == 3 + events.sort(key=lambda e: e["tags"]["task_id"]) + + assert events[0]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": False}, + ] + } + assert events[1]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": False}, + {"flag": "other", "result": False}, + ] + } + assert events[2]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": False}, + {"flag": "world", "result": False}, + ] + } + + +@pytest.mark.skipif(sys.version_info < (3, 7), reason="requires python3.7 or higher") +def test_featureflags_integration_asyncio( + sentry_init, capture_events, uninstall_integration +): + asyncio = pytest.importorskip("asyncio") + + uninstall_integration(FeatureFlagsIntegration.identifier) + sentry_init(integrations=[FeatureFlagsIntegration()]) + events = capture_events() + + # Capture an eval before we split isolation scopes. + add_feature_flag("hello", False) + + async def task(flag_key): + # Creates a new isolation scope for the thread. + # This means the evaluations in each task are captured separately. + with sentry_sdk.isolation_scope(): + add_feature_flag(flag_key, False) + # use a tag to identify to identify events later on + sentry_sdk.set_tag("task_id", flag_key) + sentry_sdk.capture_exception(Exception("something wrong!")) + + async def runner(): + return asyncio.gather(task("world"), task("other")) + + asyncio.run(runner()) + + # Capture error in original scope + sentry_sdk.set_tag("task_id", "0") + sentry_sdk.capture_exception(Exception("something wrong!")) + + assert len(events) == 3 + events.sort(key=lambda e: e["tags"]["task_id"]) + + assert events[0]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": False}, + ] + } + assert events[1]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": False}, + {"flag": "other", "result": False}, + ] + } + assert events[2]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": False}, + {"flag": "world", "result": False}, + ] + } diff --git a/tests/integrations/launchdarkly/test_launchdarkly.py b/tests/integrations/launchdarkly/test_launchdarkly.py index acbe764104..f66a4219ec 100644 --- a/tests/integrations/launchdarkly/test_launchdarkly.py +++ b/tests/integrations/launchdarkly/test_launchdarkly.py @@ -1,9 +1,7 @@ -import asyncio import concurrent.futures as cf +import sys import ldclient - -import sentry_sdk import pytest from ldclient import LDClient @@ -11,6 +9,7 @@ from ldclient.context import Context from ldclient.integrations.test_data import TestData +import sentry_sdk from sentry_sdk.integrations import DidNotEnable from sentry_sdk.integrations.launchdarkly import LaunchDarklyIntegration @@ -19,9 +18,13 @@ "use_global_client", (False, True), ) -def test_launchdarkly_integration(sentry_init, use_global_client): +def test_launchdarkly_integration( + sentry_init, use_global_client, capture_events, uninstall_integration +): td = TestData.data_source() config = Config("sdk-key", update_processor_class=td) + + uninstall_integration(LaunchDarklyIntegration.identifier) if use_global_client: ldclient.set_config(config) sentry_init(integrations=[LaunchDarklyIntegration()]) @@ -39,25 +42,38 @@ def test_launchdarkly_integration(sentry_init, use_global_client): client.variation("world", Context.create("user1", "user"), False) client.variation("other", Context.create("user2", "user"), False) - assert sentry_sdk.get_current_scope().flags.get() == [ - {"flag": "hello", "result": True}, - {"flag": "world", "result": True}, - {"flag": "other", "result": False}, - ] + events = capture_events() + sentry_sdk.capture_exception(Exception("something wrong!")) + assert len(events) == 1 + assert events[0]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": True}, + {"flag": "world", "result": True}, + {"flag": "other", "result": False}, + ] + } -def test_launchdarkly_integration_threaded(sentry_init): + +def test_launchdarkly_integration_threaded( + sentry_init, capture_events, uninstall_integration +): td = TestData.data_source() client = LDClient(config=Config("sdk-key", update_processor_class=td)) - sentry_init(integrations=[LaunchDarklyIntegration(ld_client=client)]) context = Context.create("user1") + uninstall_integration(LaunchDarklyIntegration.identifier) + sentry_init(integrations=[LaunchDarklyIntegration(ld_client=client)]) + events = capture_events() + def task(flag_key): # Creates a new isolation scope for the thread. # This means the evaluations in each task are captured separately. with sentry_sdk.isolation_scope(): client.variation(flag_key, context, False) - return [f["flag"] for f in sentry_sdk.get_current_scope().flags.get()] + # use a tag to identify to identify events later on + sentry_sdk.set_tag("task_id", flag_key) + sentry_sdk.capture_exception(Exception("something wrong!")) td.update(td.flag("hello").variation_for_all(True)) td.update(td.flag("world").variation_for_all(False)) @@ -65,34 +81,91 @@ def task(flag_key): client.variation("hello", context, False) with cf.ThreadPoolExecutor(max_workers=2) as pool: - results = list(pool.map(task, ["world", "other"])) - - assert results[0] == ["hello", "world"] - assert results[1] == ["hello", "other"] + pool.map(task, ["world", "other"]) + + # Capture error in original scope + sentry_sdk.set_tag("task_id", "0") + sentry_sdk.capture_exception(Exception("something wrong!")) + + assert len(events) == 3 + events.sort(key=lambda e: e["tags"]["task_id"]) + + assert events[0]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": True}, + ] + } + assert events[1]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": True}, + {"flag": "other", "result": False}, + ] + } + assert events[2]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": True}, + {"flag": "world", "result": False}, + ] + } + + +@pytest.mark.skipif(sys.version_info < (3, 7), reason="requires python3.7 or higher") +def test_launchdarkly_integration_asyncio( + sentry_init, capture_events, uninstall_integration +): + """Assert concurrently evaluated flags do not pollute one another.""" + asyncio = pytest.importorskip("asyncio") -def test_launchdarkly_integration_asyncio(sentry_init): - """Assert concurrently evaluated flags do not pollute one another.""" td = TestData.data_source() client = LDClient(config=Config("sdk-key", update_processor_class=td)) - sentry_init(integrations=[LaunchDarklyIntegration(ld_client=client)]) context = Context.create("user1") + uninstall_integration(LaunchDarklyIntegration.identifier) + sentry_init(integrations=[LaunchDarklyIntegration(ld_client=client)]) + events = capture_events() + async def task(flag_key): with sentry_sdk.isolation_scope(): client.variation(flag_key, context, False) - return [f["flag"] for f in sentry_sdk.get_current_scope().flags.get()] + # use a tag to identify to identify events later on + sentry_sdk.set_tag("task_id", flag_key) + sentry_sdk.capture_exception(Exception("something wrong!")) async def runner(): return asyncio.gather(task("world"), task("other")) td.update(td.flag("hello").variation_for_all(True)) td.update(td.flag("world").variation_for_all(False)) + # Capture an eval before we split isolation scopes. client.variation("hello", context, False) - results = asyncio.run(runner()).result() - assert results[0] == ["hello", "world"] - assert results[1] == ["hello", "other"] + asyncio.run(runner()) + + # Capture error in original scope + sentry_sdk.set_tag("task_id", "0") + sentry_sdk.capture_exception(Exception("something wrong!")) + + assert len(events) == 3 + events.sort(key=lambda e: e["tags"]["task_id"]) + + assert events[0]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": True}, + ] + } + assert events[1]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": True}, + {"flag": "other", "result": False}, + ] + } + assert events[2]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": True}, + {"flag": "world", "result": False}, + ] + } def test_launchdarkly_integration_did_not_enable(monkeypatch): diff --git a/tests/integrations/openfeature/test_openfeature.py b/tests/integrations/openfeature/test_openfeature.py index 24e7857f9a..c180211c3f 100644 --- a/tests/integrations/openfeature/test_openfeature.py +++ b/tests/integrations/openfeature/test_openfeature.py @@ -1,13 +1,17 @@ -import asyncio import concurrent.futures as cf -import sentry_sdk +import sys + +import pytest from openfeature import api from openfeature.provider.in_memory_provider import InMemoryFlag, InMemoryProvider + +import sentry_sdk from sentry_sdk.integrations.openfeature import OpenFeatureIntegration -def test_openfeature_integration(sentry_init): +def test_openfeature_integration(sentry_init, capture_events, uninstall_integration): + uninstall_integration(OpenFeatureIntegration.identifier) sentry_init(integrations=[OpenFeatureIntegration()]) flags = { @@ -21,15 +25,25 @@ def test_openfeature_integration(sentry_init): client.get_boolean_value("world", default_value=False) client.get_boolean_value("other", default_value=True) - assert sentry_sdk.get_current_scope().flags.get() == [ - {"flag": "hello", "result": True}, - {"flag": "world", "result": False}, - {"flag": "other", "result": True}, - ] + events = capture_events() + sentry_sdk.capture_exception(Exception("something wrong!")) + + assert len(events) == 1 + assert events[0]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": True}, + {"flag": "world", "result": False}, + {"flag": "other", "result": True}, + ] + } -def test_openfeature_integration_threaded(sentry_init): +def test_openfeature_integration_threaded( + sentry_init, capture_events, uninstall_integration +): + uninstall_integration(OpenFeatureIntegration.identifier) sentry_init(integrations=[OpenFeatureIntegration()]) + events = capture_events() flags = { "hello": InMemoryFlag("on", {"on": True, "off": False}), @@ -37,6 +51,7 @@ def test_openfeature_integration_threaded(sentry_init): } api.set_provider(InMemoryProvider(flags)) + # Capture an eval before we split isolation scopes. client = api.get_client() client.get_boolean_value("hello", default_value=False) @@ -44,37 +59,95 @@ def task(flag): # Create a new isolation scope for the thread. This means the flags with sentry_sdk.isolation_scope(): client.get_boolean_value(flag, default_value=False) - return [f["flag"] for f in sentry_sdk.get_current_scope().flags.get()] + # use a tag to identify to identify events later on + sentry_sdk.set_tag("task_id", flag) + sentry_sdk.capture_exception(Exception("something wrong!")) + # Run tasks in separate threads with cf.ThreadPoolExecutor(max_workers=2) as pool: - results = list(pool.map(task, ["world", "other"])) + pool.map(task, ["world", "other"]) - assert results[0] == ["hello", "world"] - assert results[1] == ["hello", "other"] + # Capture error in original scope + sentry_sdk.set_tag("task_id", "0") + sentry_sdk.capture_exception(Exception("something wrong!")) + + assert len(events) == 3 + events.sort(key=lambda e: e["tags"]["task_id"]) + + assert events[0]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": True}, + ] + } + assert events[1]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": True}, + {"flag": "other", "result": False}, + ] + } + assert events[2]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": True}, + {"flag": "world", "result": False}, + ] + } -def test_openfeature_integration_asyncio(sentry_init): +@pytest.mark.skipif(sys.version_info < (3, 7), reason="requires python3.7 or higher") +def test_openfeature_integration_asyncio( + sentry_init, capture_events, uninstall_integration +): """Assert concurrently evaluated flags do not pollute one another.""" + asyncio = pytest.importorskip("asyncio") + + uninstall_integration(OpenFeatureIntegration.identifier) + sentry_init(integrations=[OpenFeatureIntegration()]) + events = capture_events() + async def task(flag): with sentry_sdk.isolation_scope(): client.get_boolean_value(flag, default_value=False) - return [f["flag"] for f in sentry_sdk.get_current_scope().flags.get()] + # use a tag to identify to identify events later on + sentry_sdk.set_tag("task_id", flag) + sentry_sdk.capture_exception(Exception("something wrong!")) async def runner(): return asyncio.gather(task("world"), task("other")) - sentry_init(integrations=[OpenFeatureIntegration()]) - flags = { "hello": InMemoryFlag("on", {"on": True, "off": False}), "world": InMemoryFlag("off", {"on": True, "off": False}), } api.set_provider(InMemoryProvider(flags)) + # Capture an eval before we split isolation scopes. client = api.get_client() client.get_boolean_value("hello", default_value=False) - results = asyncio.run(runner()).result() - assert results[0] == ["hello", "world"] - assert results[1] == ["hello", "other"] + asyncio.run(runner()) + + # Capture error in original scope + sentry_sdk.set_tag("task_id", "0") + sentry_sdk.capture_exception(Exception("something wrong!")) + + assert len(events) == 3 + events.sort(key=lambda e: e["tags"]["task_id"]) + + assert events[0]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": True}, + ] + } + assert events[1]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": True}, + {"flag": "other", "result": False}, + ] + } + assert events[2]["contexts"]["flags"] == { + "values": [ + {"flag": "hello", "result": True}, + {"flag": "world", "result": False}, + ] + } From fe4b88b8505376ace7c6f8750f83fd2af383190f Mon Sep 17 00:00:00 2001 From: Anton Pirker Date: Thu, 19 Dec 2024 14:00:09 +0100 Subject: [PATCH 2/7] Add github workflow to comment on issues when a fix was released (#3866) --- .github/workflows/release-comment-issues.yml | 31 ++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 .github/workflows/release-comment-issues.yml diff --git a/.github/workflows/release-comment-issues.yml b/.github/workflows/release-comment-issues.yml new file mode 100644 index 0000000000..d31c61dced --- /dev/null +++ b/.github/workflows/release-comment-issues.yml @@ -0,0 +1,31 @@ +name: "Automation: Notify issues for release" +on: + release: + types: + - published + workflow_dispatch: + inputs: + version: + description: Which version to notify issues for + required: false + +# This workflow is triggered when a release is published +jobs: + release-comment-issues: + runs-on: ubuntu-20.04 + name: Notify issues + steps: + - name: Get version + id: get_version + run: echo "version=${{ github.event.inputs.version || github.event.release.tag_name }}" >> $GITHUB_OUTPUT + + - name: Comment on linked issues that are mentioned in release + if: | + steps.get_version.outputs.version != '' + && !contains(steps.get_version.outputs.version, 'a') + && !contains(steps.get_version.outputs.version, 'b') + && !contains(steps.get_version.outputs.version, 'rc') + uses: getsentry/release-comment-issues-gh-action@v1 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + version: ${{ steps.get_version.outputs.version }} \ No newline at end of file From 54aede36f9d3942c1069b47b20b88f01cb461fb5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Dec 2024 15:34:49 +0100 Subject: [PATCH 3/7] build(deps): bump codecov/codecov-action from 5.0.7 to 5.1.1 (#3867) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 5.0.7 to 5.1.1. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v5.0.7...v5.1.1) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Anton Pirker --- .github/workflows/test-integrations-ai.yml | 4 ++-- .github/workflows/test-integrations-aws.yml | 2 +- .github/workflows/test-integrations-cloud.yml | 4 ++-- .github/workflows/test-integrations-common.yml | 2 +- .github/workflows/test-integrations-dbs.yml | 4 ++-- .github/workflows/test-integrations-graphql.yml | 4 ++-- .github/workflows/test-integrations-misc.yml | 4 ++-- .github/workflows/test-integrations-network.yml | 4 ++-- .github/workflows/test-integrations-tasks.yml | 4 ++-- .github/workflows/test-integrations-web-1.yml | 4 ++-- .github/workflows/test-integrations-web-2.yml | 4 ++-- scripts/split-tox-gh-actions/templates/test_group.jinja | 2 +- 12 files changed, 21 insertions(+), 21 deletions(-) diff --git a/.github/workflows/test-integrations-ai.yml b/.github/workflows/test-integrations-ai.yml index 5d1b05add8..8be64736c1 100644 --- a/.github/workflows/test-integrations-ai.yml +++ b/.github/workflows/test-integrations-ai.yml @@ -78,7 +78,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml @@ -150,7 +150,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-aws.yml b/.github/workflows/test-integrations-aws.yml index d2ce22f326..6eed3a3ab1 100644 --- a/.github/workflows/test-integrations-aws.yml +++ b/.github/workflows/test-integrations-aws.yml @@ -97,7 +97,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-cloud.yml b/.github/workflows/test-integrations-cloud.yml index 8fdd4a0649..677385e405 100644 --- a/.github/workflows/test-integrations-cloud.yml +++ b/.github/workflows/test-integrations-cloud.yml @@ -74,7 +74,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml @@ -142,7 +142,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-common.yml b/.github/workflows/test-integrations-common.yml index 8294b9480e..9c476553f5 100644 --- a/.github/workflows/test-integrations-common.yml +++ b/.github/workflows/test-integrations-common.yml @@ -62,7 +62,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-dbs.yml b/.github/workflows/test-integrations-dbs.yml index 0d9a7bbd7d..cbaa2c32d2 100644 --- a/.github/workflows/test-integrations-dbs.yml +++ b/.github/workflows/test-integrations-dbs.yml @@ -101,7 +101,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml @@ -196,7 +196,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-graphql.yml b/.github/workflows/test-integrations-graphql.yml index 30480efe2e..d582717fff 100644 --- a/.github/workflows/test-integrations-graphql.yml +++ b/.github/workflows/test-integrations-graphql.yml @@ -74,7 +74,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml @@ -142,7 +142,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-misc.yml b/.github/workflows/test-integrations-misc.yml index b88b256384..00b1286362 100644 --- a/.github/workflows/test-integrations-misc.yml +++ b/.github/workflows/test-integrations-misc.yml @@ -90,7 +90,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml @@ -174,7 +174,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-network.yml b/.github/workflows/test-integrations-network.yml index 0a51866164..8f6bd9fd61 100644 --- a/.github/workflows/test-integrations-network.yml +++ b/.github/workflows/test-integrations-network.yml @@ -74,7 +74,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml @@ -142,7 +142,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-tasks.yml b/.github/workflows/test-integrations-tasks.yml index 695c338721..74c868d9b9 100644 --- a/.github/workflows/test-integrations-tasks.yml +++ b/.github/workflows/test-integrations-tasks.yml @@ -92,7 +92,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml @@ -178,7 +178,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-web-1.yml b/.github/workflows/test-integrations-web-1.yml index 6e172182b3..5be067a36b 100644 --- a/.github/workflows/test-integrations-web-1.yml +++ b/.github/workflows/test-integrations-web-1.yml @@ -92,7 +92,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml @@ -178,7 +178,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-web-2.yml b/.github/workflows/test-integrations-web-2.yml index f9f2651cb8..7ce0399a13 100644 --- a/.github/workflows/test-integrations-web-2.yml +++ b/.github/workflows/test-integrations-web-2.yml @@ -98,7 +98,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml @@ -190,7 +190,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/scripts/split-tox-gh-actions/templates/test_group.jinja b/scripts/split-tox-gh-actions/templates/test_group.jinja index 522be6dc5c..7225bbbfe5 100644 --- a/scripts/split-tox-gh-actions/templates/test_group.jinja +++ b/scripts/split-tox-gh-actions/templates/test_group.jinja @@ -92,7 +92,7 @@ - name: Upload coverage to Codecov if: {% raw %}${{ !cancelled() }}{% endraw %} - uses: codecov/codecov-action@v5.0.7 + uses: codecov/codecov-action@v5.1.1 with: token: {% raw %}${{ secrets.CODECOV_TOKEN }}{% endraw %} files: coverage.xml From 6e4cc36fbb66a09f4272176fc8972368e1028ae8 Mon Sep 17 00:00:00 2001 From: seyoon-lim Date: Fri, 20 Dec 2024 16:43:19 +0900 Subject: [PATCH 4/7] Support SparkIntegration activation after SparkContext created (#3411) --------- Co-authored-by: Anton Pirker --- sentry_sdk/integrations/spark/spark_driver.py | 121 +++++++---- tests/integrations/asgi/test_asgi.py | 1 - tests/integrations/spark/test_spark.py | 202 ++++++++++-------- 3 files changed, 189 insertions(+), 135 deletions(-) diff --git a/sentry_sdk/integrations/spark/spark_driver.py b/sentry_sdk/integrations/spark/spark_driver.py index c6470f2302..a86f16344d 100644 --- a/sentry_sdk/integrations/spark/spark_driver.py +++ b/sentry_sdk/integrations/spark/spark_driver.py @@ -9,6 +9,7 @@ from typing import Optional from sentry_sdk._types import Event, Hint + from pyspark import SparkContext class SparkIntegration(Integration): @@ -17,7 +18,7 @@ class SparkIntegration(Integration): @staticmethod def setup_once(): # type: () -> None - patch_spark_context_init() + _setup_sentry_tracing() def _set_app_properties(): @@ -37,7 +38,7 @@ def _set_app_properties(): def _start_sentry_listener(sc): - # type: (Any) -> None + # type: (SparkContext) -> None """ Start java gateway server to add custom `SparkListener` """ @@ -49,7 +50,51 @@ def _start_sentry_listener(sc): sc._jsc.sc().addSparkListener(listener) -def patch_spark_context_init(): +def _add_event_processor(sc): + # type: (SparkContext) -> None + scope = sentry_sdk.get_isolation_scope() + + @scope.add_event_processor + def process_event(event, hint): + # type: (Event, Hint) -> Optional[Event] + with capture_internal_exceptions(): + if sentry_sdk.get_client().get_integration(SparkIntegration) is None: + return event + + if sc._active_spark_context is None: + return event + + event.setdefault("user", {}).setdefault("id", sc.sparkUser()) + + event.setdefault("tags", {}).setdefault( + "executor.id", sc._conf.get("spark.executor.id") + ) + event["tags"].setdefault( + "spark-submit.deployMode", + sc._conf.get("spark.submit.deployMode"), + ) + event["tags"].setdefault("driver.host", sc._conf.get("spark.driver.host")) + event["tags"].setdefault("driver.port", sc._conf.get("spark.driver.port")) + event["tags"].setdefault("spark_version", sc.version) + event["tags"].setdefault("app_name", sc.appName) + event["tags"].setdefault("application_id", sc.applicationId) + event["tags"].setdefault("master", sc.master) + event["tags"].setdefault("spark_home", sc.sparkHome) + + event.setdefault("extra", {}).setdefault("web_url", sc.uiWebUrl) + + return event + + +def _activate_integration(sc): + # type: (SparkContext) -> None + + _start_sentry_listener(sc) + _set_app_properties() + _add_event_processor(sc) + + +def _patch_spark_context_init(): # type: () -> None from pyspark import SparkContext @@ -59,51 +104,22 @@ def patch_spark_context_init(): def _sentry_patched_spark_context_init(self, *args, **kwargs): # type: (SparkContext, *Any, **Any) -> Optional[Any] rv = spark_context_init(self, *args, **kwargs) - _start_sentry_listener(self) - _set_app_properties() - - scope = sentry_sdk.get_isolation_scope() - - @scope.add_event_processor - def process_event(event, hint): - # type: (Event, Hint) -> Optional[Event] - with capture_internal_exceptions(): - if sentry_sdk.get_client().get_integration(SparkIntegration) is None: - return event - - if self._active_spark_context is None: - return event - - event.setdefault("user", {}).setdefault("id", self.sparkUser()) - - event.setdefault("tags", {}).setdefault( - "executor.id", self._conf.get("spark.executor.id") - ) - event["tags"].setdefault( - "spark-submit.deployMode", - self._conf.get("spark.submit.deployMode"), - ) - event["tags"].setdefault( - "driver.host", self._conf.get("spark.driver.host") - ) - event["tags"].setdefault( - "driver.port", self._conf.get("spark.driver.port") - ) - event["tags"].setdefault("spark_version", self.version) - event["tags"].setdefault("app_name", self.appName) - event["tags"].setdefault("application_id", self.applicationId) - event["tags"].setdefault("master", self.master) - event["tags"].setdefault("spark_home", self.sparkHome) - - event.setdefault("extra", {}).setdefault("web_url", self.uiWebUrl) - - return event - + _activate_integration(self) return rv SparkContext._do_init = _sentry_patched_spark_context_init +def _setup_sentry_tracing(): + # type: () -> None + from pyspark import SparkContext + + if SparkContext._active_spark_context is not None: + _activate_integration(SparkContext._active_spark_context) + return + _patch_spark_context_init() + + class SparkListener: def onApplicationEnd(self, applicationEnd): # noqa: N802,N803 # type: (Any) -> None @@ -208,10 +224,21 @@ class Java: class SentryListener(SparkListener): + def _add_breadcrumb( + self, + level, # type: str + message, # type: str + data=None, # type: Optional[dict[str, Any]] + ): + # type: (...) -> None + sentry_sdk.get_global_scope().add_breadcrumb( + level=level, message=message, data=data + ) + def onJobStart(self, jobStart): # noqa: N802,N803 # type: (Any) -> None message = "Job {} Started".format(jobStart.jobId()) - sentry_sdk.add_breadcrumb(level="info", message=message) + self._add_breadcrumb(level="info", message=message) _set_app_properties() def onJobEnd(self, jobEnd): # noqa: N802,N803 @@ -227,14 +254,14 @@ def onJobEnd(self, jobEnd): # noqa: N802,N803 level = "warning" message = "Job {} Failed".format(jobEnd.jobId()) - sentry_sdk.add_breadcrumb(level=level, message=message, data=data) + self._add_breadcrumb(level=level, message=message, data=data) def onStageSubmitted(self, stageSubmitted): # noqa: N802,N803 # type: (Any) -> None stage_info = stageSubmitted.stageInfo() message = "Stage {} Submitted".format(stage_info.stageId()) data = {"attemptId": stage_info.attemptId(), "name": stage_info.name()} - sentry_sdk.add_breadcrumb(level="info", message=message, data=data) + self._add_breadcrumb(level="info", message=message, data=data) _set_app_properties() def onStageCompleted(self, stageCompleted): # noqa: N802,N803 @@ -255,4 +282,4 @@ def onStageCompleted(self, stageCompleted): # noqa: N802,N803 message = "Stage {} Completed".format(stage_info.stageId()) level = "info" - sentry_sdk.add_breadcrumb(level=level, message=message, data=data) + self._add_breadcrumb(level=level, message=message, data=data) diff --git a/tests/integrations/asgi/test_asgi.py b/tests/integrations/asgi/test_asgi.py index e0a3900a38..f3bc7147bf 100644 --- a/tests/integrations/asgi/test_asgi.py +++ b/tests/integrations/asgi/test_asgi.py @@ -128,7 +128,6 @@ async def app(scope, receive, send): @pytest.fixture def asgi3_custom_transaction_app(): - async def app(scope, receive, send): sentry_sdk.get_current_scope().set_transaction_name("foobar", source="custom") await send( diff --git a/tests/integrations/spark/test_spark.py b/tests/integrations/spark/test_spark.py index 58c8862ee2..44ba9f8728 100644 --- a/tests/integrations/spark/test_spark.py +++ b/tests/integrations/spark/test_spark.py @@ -1,6 +1,7 @@ import pytest import sys from unittest.mock import patch + from sentry_sdk.integrations.spark.spark_driver import ( _set_app_properties, _start_sentry_listener, @@ -18,8 +19,22 @@ ################ -def test_set_app_properties(): - spark_context = SparkContext(appName="Testing123") +@pytest.fixture(scope="function") +def sentry_init_with_reset(sentry_init): + from sentry_sdk.integrations import _processed_integrations + + yield lambda: sentry_init(integrations=[SparkIntegration()]) + _processed_integrations.remove("spark") + + +@pytest.fixture(scope="function") +def create_spark_context(): + yield lambda: SparkContext(appName="Testing123") + SparkContext._active_spark_context.stop() + + +def test_set_app_properties(create_spark_context): + spark_context = create_spark_context() _set_app_properties() assert spark_context.getLocalProperty("sentry_app_name") == "Testing123" @@ -30,9 +45,8 @@ def test_set_app_properties(): ) -def test_start_sentry_listener(): - spark_context = SparkContext.getOrCreate() - +def test_start_sentry_listener(create_spark_context): + spark_context = create_spark_context() gateway = spark_context._gateway assert gateway._callback_server is None @@ -41,9 +55,28 @@ def test_start_sentry_listener(): assert gateway._callback_server is not None -def test_initialize_spark_integration(sentry_init): - sentry_init(integrations=[SparkIntegration()]) - SparkContext.getOrCreate() +@patch("sentry_sdk.integrations.spark.spark_driver._patch_spark_context_init") +def test_initialize_spark_integration_before_spark_context_init( + mock_patch_spark_context_init, + sentry_init_with_reset, + create_spark_context, +): + sentry_init_with_reset() + create_spark_context() + + mock_patch_spark_context_init.assert_called_once() + + +@patch("sentry_sdk.integrations.spark.spark_driver._activate_integration") +def test_initialize_spark_integration_after_spark_context_init( + mock_activate_integration, + create_spark_context, + sentry_init_with_reset, +): + create_spark_context() + sentry_init_with_reset() + + mock_activate_integration.assert_called_once() @pytest.fixture @@ -54,88 +87,83 @@ def sentry_listener(): return listener -@pytest.fixture -def mock_add_breadcrumb(): - with patch("sentry_sdk.add_breadcrumb") as mock: - yield mock - - -def test_sentry_listener_on_job_start(sentry_listener, mock_add_breadcrumb): +def test_sentry_listener_on_job_start(sentry_listener): listener = sentry_listener + with patch.object(listener, "_add_breadcrumb") as mock_add_breadcrumb: - class MockJobStart: - def jobId(self): # noqa: N802 - return "sample-job-id-start" + class MockJobStart: + def jobId(self): # noqa: N802 + return "sample-job-id-start" - mock_job_start = MockJobStart() - listener.onJobStart(mock_job_start) + mock_job_start = MockJobStart() + listener.onJobStart(mock_job_start) - mock_add_breadcrumb.assert_called_once() - mock_hub = mock_add_breadcrumb.call_args + mock_add_breadcrumb.assert_called_once() + mock_hub = mock_add_breadcrumb.call_args - assert mock_hub.kwargs["level"] == "info" - assert "sample-job-id-start" in mock_hub.kwargs["message"] + assert mock_hub.kwargs["level"] == "info" + assert "sample-job-id-start" in mock_hub.kwargs["message"] @pytest.mark.parametrize( "job_result, level", [("JobSucceeded", "info"), ("JobFailed", "warning")] ) -def test_sentry_listener_on_job_end( - sentry_listener, mock_add_breadcrumb, job_result, level -): +def test_sentry_listener_on_job_end(sentry_listener, job_result, level): listener = sentry_listener + with patch.object(listener, "_add_breadcrumb") as mock_add_breadcrumb: - class MockJobResult: - def toString(self): # noqa: N802 - return job_result + class MockJobResult: + def toString(self): # noqa: N802 + return job_result - class MockJobEnd: - def jobId(self): # noqa: N802 - return "sample-job-id-end" + class MockJobEnd: + def jobId(self): # noqa: N802 + return "sample-job-id-end" - def jobResult(self): # noqa: N802 - result = MockJobResult() - return result + def jobResult(self): # noqa: N802 + result = MockJobResult() + return result - mock_job_end = MockJobEnd() - listener.onJobEnd(mock_job_end) + mock_job_end = MockJobEnd() + listener.onJobEnd(mock_job_end) - mock_add_breadcrumb.assert_called_once() - mock_hub = mock_add_breadcrumb.call_args + mock_add_breadcrumb.assert_called_once() + mock_hub = mock_add_breadcrumb.call_args - assert mock_hub.kwargs["level"] == level - assert mock_hub.kwargs["data"]["result"] == job_result - assert "sample-job-id-end" in mock_hub.kwargs["message"] + assert mock_hub.kwargs["level"] == level + assert mock_hub.kwargs["data"]["result"] == job_result + assert "sample-job-id-end" in mock_hub.kwargs["message"] -def test_sentry_listener_on_stage_submitted(sentry_listener, mock_add_breadcrumb): +def test_sentry_listener_on_stage_submitted(sentry_listener): listener = sentry_listener + with patch.object(listener, "_add_breadcrumb") as mock_add_breadcrumb: - class StageInfo: - def stageId(self): # noqa: N802 - return "sample-stage-id-submit" + class StageInfo: + def stageId(self): # noqa: N802 + return "sample-stage-id-submit" - def name(self): - return "run-job" + def name(self): + return "run-job" - def attemptId(self): # noqa: N802 - return 14 + def attemptId(self): # noqa: N802 + return 14 - class MockStageSubmitted: - def stageInfo(self): # noqa: N802 - stageinf = StageInfo() - return stageinf + class MockStageSubmitted: + def stageInfo(self): # noqa: N802 + stageinf = StageInfo() + return stageinf - mock_stage_submitted = MockStageSubmitted() - listener.onStageSubmitted(mock_stage_submitted) + mock_stage_submitted = MockStageSubmitted() + listener.onStageSubmitted(mock_stage_submitted) - mock_add_breadcrumb.assert_called_once() - mock_hub = mock_add_breadcrumb.call_args + mock_add_breadcrumb.assert_called_once() + mock_hub = mock_add_breadcrumb.call_args - assert mock_hub.kwargs["level"] == "info" - assert "sample-stage-id-submit" in mock_hub.kwargs["message"] - assert mock_hub.kwargs["data"]["attemptId"] == 14 - assert mock_hub.kwargs["data"]["name"] == "run-job" + assert mock_hub.kwargs["level"] == "info" + assert "sample-stage-id-submit" in mock_hub.kwargs["message"] + assert mock_hub.kwargs["data"]["attemptId"] == 14 + assert mock_hub.kwargs["data"]["name"] == "run-job" @pytest.fixture @@ -175,39 +203,39 @@ def stageInfo(self): # noqa: N802 def test_sentry_listener_on_stage_completed_success( - sentry_listener, mock_add_breadcrumb, get_mock_stage_completed + sentry_listener, get_mock_stage_completed ): listener = sentry_listener + with patch.object(listener, "_add_breadcrumb") as mock_add_breadcrumb: + mock_stage_completed = get_mock_stage_completed(failure_reason=False) + listener.onStageCompleted(mock_stage_completed) - mock_stage_completed = get_mock_stage_completed(failure_reason=False) - listener.onStageCompleted(mock_stage_completed) - - mock_add_breadcrumb.assert_called_once() - mock_hub = mock_add_breadcrumb.call_args + mock_add_breadcrumb.assert_called_once() + mock_hub = mock_add_breadcrumb.call_args - assert mock_hub.kwargs["level"] == "info" - assert "sample-stage-id-submit" in mock_hub.kwargs["message"] - assert mock_hub.kwargs["data"]["attemptId"] == 14 - assert mock_hub.kwargs["data"]["name"] == "run-job" - assert "reason" not in mock_hub.kwargs["data"] + assert mock_hub.kwargs["level"] == "info" + assert "sample-stage-id-submit" in mock_hub.kwargs["message"] + assert mock_hub.kwargs["data"]["attemptId"] == 14 + assert mock_hub.kwargs["data"]["name"] == "run-job" + assert "reason" not in mock_hub.kwargs["data"] def test_sentry_listener_on_stage_completed_failure( - sentry_listener, mock_add_breadcrumb, get_mock_stage_completed + sentry_listener, get_mock_stage_completed ): listener = sentry_listener - - mock_stage_completed = get_mock_stage_completed(failure_reason=True) - listener.onStageCompleted(mock_stage_completed) - - mock_add_breadcrumb.assert_called_once() - mock_hub = mock_add_breadcrumb.call_args - - assert mock_hub.kwargs["level"] == "warning" - assert "sample-stage-id-submit" in mock_hub.kwargs["message"] - assert mock_hub.kwargs["data"]["attemptId"] == 14 - assert mock_hub.kwargs["data"]["name"] == "run-job" - assert mock_hub.kwargs["data"]["reason"] == "failure-reason" + with patch.object(listener, "_add_breadcrumb") as mock_add_breadcrumb: + mock_stage_completed = get_mock_stage_completed(failure_reason=True) + listener.onStageCompleted(mock_stage_completed) + + mock_add_breadcrumb.assert_called_once() + mock_hub = mock_add_breadcrumb.call_args + + assert mock_hub.kwargs["level"] == "warning" + assert "sample-stage-id-submit" in mock_hub.kwargs["message"] + assert mock_hub.kwargs["data"]["attemptId"] == 14 + assert mock_hub.kwargs["data"]["name"] == "run-job" + assert mock_hub.kwargs["data"]["reason"] == "failure-reason" ################ From 8ced6609e6fcc95855f43cf9fc1d94b59836b57f Mon Sep 17 00:00:00 2001 From: Ivana Kellyer Date: Fri, 20 Dec 2024 10:15:48 +0100 Subject: [PATCH 5/7] Rename scripts (#3885) --- .github/workflows/ci.yml | 4 ++-- .github/workflows/test-integrations-ai.yml | 6 ++++-- .github/workflows/test-integrations-aws.yml | 6 ++++-- .github/workflows/test-integrations-cloud.yml | 6 ++++-- .github/workflows/test-integrations-common.yml | 6 ++++-- .github/workflows/test-integrations-dbs.yml | 6 ++++-- .github/workflows/test-integrations-graphql.yml | 6 ++++-- .github/workflows/test-integrations-misc.yml | 6 ++++-- .github/workflows/test-integrations-network.yml | 6 ++++-- .github/workflows/test-integrations-tasks.yml | 6 ++++-- .github/workflows/test-integrations-web-1.yml | 6 ++++-- .github/workflows/test-integrations-web-2.yml | 6 ++++-- ...er-versions.sh => aws-delete-lambda-layer-versions.sh} | 0 scripts/split_tox_gh_actions/__init__.py | 0 scripts/split_tox_gh_actions/requirements.txt | 1 + .../split_tox_gh_actions.py} | 8 ++++---- .../templates/base.jinja | 6 ++++-- .../templates/check_permissions.jinja | 0 .../templates/check_required.jinja | 0 .../templates/test_group.jinja | 0 20 files changed, 55 insertions(+), 30 deletions(-) rename scripts/{aws-delete-lamba-layer-versions.sh => aws-delete-lambda-layer-versions.sh} (100%) create mode 100644 scripts/split_tox_gh_actions/__init__.py create mode 100644 scripts/split_tox_gh_actions/requirements.txt rename scripts/{split-tox-gh-actions/split-tox-gh-actions.py => split_tox_gh_actions/split_tox_gh_actions.py} (96%) rename scripts/{split-tox-gh-actions => split_tox_gh_actions}/templates/base.jinja (87%) rename scripts/{split-tox-gh-actions => split_tox_gh_actions}/templates/check_permissions.jinja (100%) rename scripts/{split-tox-gh-actions => split_tox_gh_actions}/templates/check_required.jinja (100%) rename scripts/{split-tox-gh-actions => split_tox_gh_actions}/templates/test_group.jinja (100%) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ed035b4ab0..7ef6604e39 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -45,8 +45,8 @@ jobs: python-version: 3.12 - run: | - pip install jinja2 - python scripts/split-tox-gh-actions/split-tox-gh-actions.py --fail-on-changes + pip install -r scripts/split_tox_gh_actions/requirements.txt + python scripts/split_tox_gh_actions/split_tox_gh_actions.py --fail-on-changes build_lambda_layer: name: Build Package diff --git a/.github/workflows/test-integrations-ai.yml b/.github/workflows/test-integrations-ai.yml index 8be64736c1..c5e1f6b87e 100644 --- a/.github/workflows/test-integrations-ai.yml +++ b/.github/workflows/test-integrations-ai.yml @@ -1,5 +1,7 @@ -# Do not edit this file. This file is generated automatically by executing -# python scripts/split-tox-gh-actions/split-tox-gh-actions.py +# Do not edit this YAML file. This file is generated automatically by executing +# python scripts/split_tox_gh_actions/split_tox_gh_actions.py +# The template responsible for it is in +# scripts/split_tox_gh_actions/templates/base.jinja name: Test AI on: push: diff --git a/.github/workflows/test-integrations-aws.yml b/.github/workflows/test-integrations-aws.yml index 6eed3a3ab1..54610f1abd 100644 --- a/.github/workflows/test-integrations-aws.yml +++ b/.github/workflows/test-integrations-aws.yml @@ -1,5 +1,7 @@ -# Do not edit this file. This file is generated automatically by executing -# python scripts/split-tox-gh-actions/split-tox-gh-actions.py +# Do not edit this YAML file. This file is generated automatically by executing +# python scripts/split_tox_gh_actions/split_tox_gh_actions.py +# The template responsible for it is in +# scripts/split_tox_gh_actions/templates/base.jinja name: Test AWS on: push: diff --git a/.github/workflows/test-integrations-cloud.yml b/.github/workflows/test-integrations-cloud.yml index 677385e405..f72fec9f9f 100644 --- a/.github/workflows/test-integrations-cloud.yml +++ b/.github/workflows/test-integrations-cloud.yml @@ -1,5 +1,7 @@ -# Do not edit this file. This file is generated automatically by executing -# python scripts/split-tox-gh-actions/split-tox-gh-actions.py +# Do not edit this YAML file. This file is generated automatically by executing +# python scripts/split_tox_gh_actions/split_tox_gh_actions.py +# The template responsible for it is in +# scripts/split_tox_gh_actions/templates/base.jinja name: Test Cloud on: push: diff --git a/.github/workflows/test-integrations-common.yml b/.github/workflows/test-integrations-common.yml index 9c476553f5..0837c60c30 100644 --- a/.github/workflows/test-integrations-common.yml +++ b/.github/workflows/test-integrations-common.yml @@ -1,5 +1,7 @@ -# Do not edit this file. This file is generated automatically by executing -# python scripts/split-tox-gh-actions/split-tox-gh-actions.py +# Do not edit this YAML file. This file is generated automatically by executing +# python scripts/split_tox_gh_actions/split_tox_gh_actions.py +# The template responsible for it is in +# scripts/split_tox_gh_actions/templates/base.jinja name: Test Common on: push: diff --git a/.github/workflows/test-integrations-dbs.yml b/.github/workflows/test-integrations-dbs.yml index cbaa2c32d2..a4aefa6a51 100644 --- a/.github/workflows/test-integrations-dbs.yml +++ b/.github/workflows/test-integrations-dbs.yml @@ -1,5 +1,7 @@ -# Do not edit this file. This file is generated automatically by executing -# python scripts/split-tox-gh-actions/split-tox-gh-actions.py +# Do not edit this YAML file. This file is generated automatically by executing +# python scripts/split_tox_gh_actions/split_tox_gh_actions.py +# The template responsible for it is in +# scripts/split_tox_gh_actions/templates/base.jinja name: Test DBs on: push: diff --git a/.github/workflows/test-integrations-graphql.yml b/.github/workflows/test-integrations-graphql.yml index d582717fff..ab7e81dcd6 100644 --- a/.github/workflows/test-integrations-graphql.yml +++ b/.github/workflows/test-integrations-graphql.yml @@ -1,5 +1,7 @@ -# Do not edit this file. This file is generated automatically by executing -# python scripts/split-tox-gh-actions/split-tox-gh-actions.py +# Do not edit this YAML file. This file is generated automatically by executing +# python scripts/split_tox_gh_actions/split_tox_gh_actions.py +# The template responsible for it is in +# scripts/split_tox_gh_actions/templates/base.jinja name: Test GraphQL on: push: diff --git a/.github/workflows/test-integrations-misc.yml b/.github/workflows/test-integrations-misc.yml index 00b1286362..1a4e910383 100644 --- a/.github/workflows/test-integrations-misc.yml +++ b/.github/workflows/test-integrations-misc.yml @@ -1,5 +1,7 @@ -# Do not edit this file. This file is generated automatically by executing -# python scripts/split-tox-gh-actions/split-tox-gh-actions.py +# Do not edit this YAML file. This file is generated automatically by executing +# python scripts/split_tox_gh_actions/split_tox_gh_actions.py +# The template responsible for it is in +# scripts/split_tox_gh_actions/templates/base.jinja name: Test Misc on: push: diff --git a/.github/workflows/test-integrations-network.yml b/.github/workflows/test-integrations-network.yml index 8f6bd9fd61..f41fd86b29 100644 --- a/.github/workflows/test-integrations-network.yml +++ b/.github/workflows/test-integrations-network.yml @@ -1,5 +1,7 @@ -# Do not edit this file. This file is generated automatically by executing -# python scripts/split-tox-gh-actions/split-tox-gh-actions.py +# Do not edit this YAML file. This file is generated automatically by executing +# python scripts/split_tox_gh_actions/split_tox_gh_actions.py +# The template responsible for it is in +# scripts/split_tox_gh_actions/templates/base.jinja name: Test Network on: push: diff --git a/.github/workflows/test-integrations-tasks.yml b/.github/workflows/test-integrations-tasks.yml index 74c868d9b9..9910b75568 100644 --- a/.github/workflows/test-integrations-tasks.yml +++ b/.github/workflows/test-integrations-tasks.yml @@ -1,5 +1,7 @@ -# Do not edit this file. This file is generated automatically by executing -# python scripts/split-tox-gh-actions/split-tox-gh-actions.py +# Do not edit this YAML file. This file is generated automatically by executing +# python scripts/split_tox_gh_actions/split_tox_gh_actions.py +# The template responsible for it is in +# scripts/split_tox_gh_actions/templates/base.jinja name: Test Tasks on: push: diff --git a/.github/workflows/test-integrations-web-1.yml b/.github/workflows/test-integrations-web-1.yml index 5be067a36b..fb7a9247d5 100644 --- a/.github/workflows/test-integrations-web-1.yml +++ b/.github/workflows/test-integrations-web-1.yml @@ -1,5 +1,7 @@ -# Do not edit this file. This file is generated automatically by executing -# python scripts/split-tox-gh-actions/split-tox-gh-actions.py +# Do not edit this YAML file. This file is generated automatically by executing +# python scripts/split_tox_gh_actions/split_tox_gh_actions.py +# The template responsible for it is in +# scripts/split_tox_gh_actions/templates/base.jinja name: Test Web 1 on: push: diff --git a/.github/workflows/test-integrations-web-2.yml b/.github/workflows/test-integrations-web-2.yml index 7ce0399a13..1910d5999e 100644 --- a/.github/workflows/test-integrations-web-2.yml +++ b/.github/workflows/test-integrations-web-2.yml @@ -1,5 +1,7 @@ -# Do not edit this file. This file is generated automatically by executing -# python scripts/split-tox-gh-actions/split-tox-gh-actions.py +# Do not edit this YAML file. This file is generated automatically by executing +# python scripts/split_tox_gh_actions/split_tox_gh_actions.py +# The template responsible for it is in +# scripts/split_tox_gh_actions/templates/base.jinja name: Test Web 2 on: push: diff --git a/scripts/aws-delete-lamba-layer-versions.sh b/scripts/aws-delete-lambda-layer-versions.sh similarity index 100% rename from scripts/aws-delete-lamba-layer-versions.sh rename to scripts/aws-delete-lambda-layer-versions.sh diff --git a/scripts/split_tox_gh_actions/__init__.py b/scripts/split_tox_gh_actions/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/split_tox_gh_actions/requirements.txt b/scripts/split_tox_gh_actions/requirements.txt new file mode 100644 index 0000000000..7f7afbf3bf --- /dev/null +++ b/scripts/split_tox_gh_actions/requirements.txt @@ -0,0 +1 @@ +jinja2 diff --git a/scripts/split-tox-gh-actions/split-tox-gh-actions.py b/scripts/split_tox_gh_actions/split_tox_gh_actions.py similarity index 96% rename from scripts/split-tox-gh-actions/split-tox-gh-actions.py rename to scripts/split_tox_gh_actions/split_tox_gh_actions.py index 26d13390c2..1b53093c5e 100755 --- a/scripts/split-tox-gh-actions/split-tox-gh-actions.py +++ b/scripts/split_tox_gh_actions/split_tox_gh_actions.py @@ -8,7 +8,7 @@ Whenever tox.ini is changed, this script needs to be run. Usage: - python split-tox-gh-actions.py [--fail-on-changes] + python split_tox_gh_actions.py [--fail-on-changes] If the parameter `--fail-on-changes` is set, the script will raise a RuntimeError in case the yaml files have been changed by the scripts execution. This is used in CI to check if the yaml files @@ -158,7 +158,7 @@ def main(fail_on_changes): if missing_frameworks: raise RuntimeError( "Please add the following frameworks to the corresponding group " - "in `GROUPS` in `scripts/split-tox-gh-actions/split-tox-gh-actions.py: " + "in `GROUPS` in `scripts/split_tox_gh_actions/split_tox_gh_actions.py: " + ", ".join(missing_frameworks) ) @@ -176,9 +176,9 @@ def main(fail_on_changes): if old_hash != new_hash: raise RuntimeError( "The yaml configuration files have changed. This means that either `tox.ini` " - "or one of the constants in `split-tox-gh-actions.py` has changed " + "or one of the constants in `split_tox_gh_actions.py` has changed " "but the changes have not been propagated to the GitHub actions config files. " - "Please run `python scripts/split-tox-gh-actions/split-tox-gh-actions.py` " + "Please run `python scripts/split_tox_gh_actions/split_tox_gh_actions.py` " "locally and commit the changes of the yaml configuration files to continue. " ) diff --git a/scripts/split-tox-gh-actions/templates/base.jinja b/scripts/split_tox_gh_actions/templates/base.jinja similarity index 87% rename from scripts/split-tox-gh-actions/templates/base.jinja rename to scripts/split_tox_gh_actions/templates/base.jinja index 23f051de42..16dbc04a76 100644 --- a/scripts/split-tox-gh-actions/templates/base.jinja +++ b/scripts/split_tox_gh_actions/templates/base.jinja @@ -1,5 +1,7 @@ -# Do not edit this file. This file is generated automatically by executing -# python scripts/split-tox-gh-actions/split-tox-gh-actions.py +# Do not edit this YAML file. This file is generated automatically by executing +# python scripts/split_tox_gh_actions/split_tox_gh_actions.py +# The template responsible for it is in +# scripts/split_tox_gh_actions/templates/base.jinja {% with lowercase_group=group | replace(" ", "_") | lower %} name: Test {{ group }} diff --git a/scripts/split-tox-gh-actions/templates/check_permissions.jinja b/scripts/split_tox_gh_actions/templates/check_permissions.jinja similarity index 100% rename from scripts/split-tox-gh-actions/templates/check_permissions.jinja rename to scripts/split_tox_gh_actions/templates/check_permissions.jinja diff --git a/scripts/split-tox-gh-actions/templates/check_required.jinja b/scripts/split_tox_gh_actions/templates/check_required.jinja similarity index 100% rename from scripts/split-tox-gh-actions/templates/check_required.jinja rename to scripts/split_tox_gh_actions/templates/check_required.jinja diff --git a/scripts/split-tox-gh-actions/templates/test_group.jinja b/scripts/split_tox_gh_actions/templates/test_group.jinja similarity index 100% rename from scripts/split-tox-gh-actions/templates/test_group.jinja rename to scripts/split_tox_gh_actions/templates/test_group.jinja From f6281f557fe62c847a0aca95eb666129e893cf32 Mon Sep 17 00:00:00 2001 From: ffelixg <142172984+ffelixg@users.noreply.github.com> Date: Fri, 20 Dec 2024 12:34:12 +0100 Subject: [PATCH 6/7] Fix lru cache copying (#3883) A simpler and better LRU Cache implementation that prevents data leaking between copied caches. Fixes #3852 --------- Co-authored-by: Anton Pirker --- sentry_sdk/_lru_cache.py | 195 +++++++-------------------------------- tests/test_lru_cache.py | 37 +++++++- tests/test_scope.py | 22 +++++ 3 files changed, 93 insertions(+), 161 deletions(-) diff --git a/sentry_sdk/_lru_cache.py b/sentry_sdk/_lru_cache.py index 825c773529..09eae27df2 100644 --- a/sentry_sdk/_lru_cache.py +++ b/sentry_sdk/_lru_cache.py @@ -1,181 +1,56 @@ -""" -A fork of Python 3.6's stdlib lru_cache (found in Python's 'cpython/Lib/functools.py') -adapted into a data structure for single threaded uses. +from typing import TYPE_CHECKING -https://github.com/python/cpython/blob/v3.6.12/Lib/functools.py +if TYPE_CHECKING: + from typing import Any -Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software Foundation; - -All Rights Reserved - - -PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 --------------------------------------------- - -1. This LICENSE AGREEMENT is between the Python Software Foundation -("PSF"), and the Individual or Organization ("Licensee") accessing and -otherwise using this software ("Python") in source or binary form and -its associated documentation. - -2. Subject to the terms and conditions of this License Agreement, PSF hereby -grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, -analyze, test, perform and/or display publicly, prepare derivative works, -distribute, and otherwise use Python alone or in any derivative version, -provided, however, that PSF's License Agreement and PSF's notice of copyright, -i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software Foundation; -All Rights Reserved" are retained in Python alone or in any derivative version -prepared by Licensee. - -3. In the event Licensee prepares a derivative work that is based on -or incorporates Python or any part thereof, and wants to make -the derivative work available to others as provided herein, then -Licensee hereby agrees to include in any such work a brief summary of -the changes made to Python. - -4. PSF is making Python available to Licensee on an "AS IS" -basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND -DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT -INFRINGE ANY THIRD PARTY RIGHTS. - -5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, -OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -6. This License Agreement will automatically terminate upon a material -breach of its terms and conditions. - -7. Nothing in this License Agreement shall be deemed to create any -relationship of agency, partnership, or joint venture between PSF and -Licensee. This License Agreement does not grant permission to use PSF -trademarks or trade name in a trademark sense to endorse or promote -products or services of Licensee, or any third party. - -8. By copying, installing or otherwise using Python, Licensee -agrees to be bound by the terms and conditions of this License -Agreement. - -""" - -from copy import copy, deepcopy - -SENTINEL = object() - - -# aliases to the entries in a node -PREV = 0 -NEXT = 1 -KEY = 2 -VALUE = 3 +_SENTINEL = object() class LRUCache: def __init__(self, max_size): - assert max_size > 0 - + # type: (int) -> None + if max_size <= 0: + raise AssertionError(f"invalid max_size: {max_size}") self.max_size = max_size - self.full = False - - self.cache = {} - - # root of the circularly linked list to keep track of - # the least recently used key - self.root = [] # type: ignore - # the node looks like [PREV, NEXT, KEY, VALUE] - self.root[:] = [self.root, self.root, None, None] - + self._data = {} # type: dict[Any, Any] self.hits = self.misses = 0 + self.full = False def __copy__(self): - cache = LRUCache(self.max_size) - cache.full = self.full - cache.cache = copy(self.cache) - cache.root = deepcopy(self.root) - return cache + # type: () -> LRUCache + new = LRUCache(max_size=self.max_size) + new.hits = self.hits + new.misses = self.misses + new.full = self.full + new._data = self._data.copy() + return new def set(self, key, value): - link = self.cache.get(key, SENTINEL) - - if link is not SENTINEL: - # have to move the node to the front of the linked list - link_prev, link_next, _key, _value = link - - # first remove the node from the lsnked list - link_prev[NEXT] = link_next - link_next[PREV] = link_prev - - # insert the node between the root and the last - last = self.root[PREV] - last[NEXT] = self.root[PREV] = link - link[PREV] = last - link[NEXT] = self.root - - # update the value - link[VALUE] = value - + # type: (Any, Any) -> None + current = self._data.pop(key, _SENTINEL) + if current is not _SENTINEL: + self._data[key] = value elif self.full: - # reuse the root node, so update its key/value - old_root = self.root - old_root[KEY] = key - old_root[VALUE] = value - - self.root = old_root[NEXT] - old_key = self.root[KEY] - - self.root[KEY] = self.root[VALUE] = None - - del self.cache[old_key] - - self.cache[key] = old_root - + self._data.pop(next(iter(self._data))) + self._data[key] = value else: - # insert new node after last - last = self.root[PREV] - link = [last, self.root, key, value] - last[NEXT] = self.root[PREV] = self.cache[key] = link - self.full = len(self.cache) >= self.max_size + self._data[key] = value + self.full = len(self._data) >= self.max_size def get(self, key, default=None): - link = self.cache.get(key, SENTINEL) - - if link is SENTINEL: + # type: (Any, Any) -> Any + try: + ret = self._data.pop(key) + except KeyError: self.misses += 1 - return default - - # have to move the node to the front of the linked list - link_prev, link_next, _key, _value = link - - # first remove the node from the lsnked list - link_prev[NEXT] = link_next - link_next[PREV] = link_prev - - # insert the node between the root and the last - last = self.root[PREV] - last[NEXT] = self.root[PREV] = link - link[PREV] = last - link[NEXT] = self.root - - self.hits += 1 + ret = default + else: + self.hits += 1 + self._data[key] = ret - return link[VALUE] + return ret def get_all(self): - nodes = [] - node = self.root[NEXT] - - # To ensure the loop always terminates we iterate to the maximum - # size of the LRU cache. - for _ in range(self.max_size): - # The cache may not be full. We exit early if we've wrapped - # around to the head. - if node is self.root: - break - nodes.append((node[KEY], node[VALUE])) - node = node[NEXT] - - return nodes + # type: () -> list[tuple[Any, Any]] + return list(self._data.items()) diff --git a/tests/test_lru_cache.py b/tests/test_lru_cache.py index cab9bbc7eb..1a54ed83d3 100644 --- a/tests/test_lru_cache.py +++ b/tests/test_lru_cache.py @@ -1,5 +1,5 @@ import pytest -from copy import copy +from copy import copy, deepcopy from sentry_sdk._lru_cache import LRUCache @@ -76,3 +76,38 @@ def test_cache_copy(): cache.get(1) assert copied.get_all() == [(1, 1), (2, 2), (3, 3)] assert cache.get_all() == [(2, 2), (3, 3), (1, 1)] + + +def test_cache_deepcopy(): + cache = LRUCache(3) + cache.set(0, 0) + cache.set(1, 1) + + copied = deepcopy(cache) + cache.set(2, 2) + cache.set(3, 3) + assert copied.get_all() == [(0, 0), (1, 1)] + assert cache.get_all() == [(1, 1), (2, 2), (3, 3)] + + copied = deepcopy(cache) + cache.get(1) + assert copied.get_all() == [(1, 1), (2, 2), (3, 3)] + assert cache.get_all() == [(2, 2), (3, 3), (1, 1)] + + +def test_cache_pollution(): + cache1 = LRUCache(max_size=2) + cache1.set(1, True) + cache2 = copy(cache1) + cache2.set(1, False) + assert cache1.get(1) is True + assert cache2.get(1) is False + + +def test_cache_pollution_deepcopy(): + cache1 = LRUCache(max_size=2) + cache1.set(1, True) + cache2 = deepcopy(cache1) + cache2.set(1, False) + assert cache1.get(1) is True + assert cache2.get(1) is False diff --git a/tests/test_scope.py b/tests/test_scope.py index a03eb07a99..9b16dc4344 100644 --- a/tests/test_scope.py +++ b/tests/test_scope.py @@ -43,6 +43,28 @@ def test_all_slots_copied(): assert getattr(scope_copy, attr) == getattr(scope, attr) +def test_scope_flags_copy(): + # Assert forking creates a deepcopy of the flag buffer. The new + # scope is free to mutate without consequence to the old scope. The + # old scope is free to mutate without consequence to the new scope. + old_scope = Scope() + old_scope.flags.set("a", True) + + new_scope = old_scope.fork() + new_scope.flags.set("a", False) + old_scope.flags.set("b", True) + new_scope.flags.set("c", True) + + assert old_scope.flags.get() == [ + {"flag": "a", "result": True}, + {"flag": "b", "result": True}, + ] + assert new_scope.flags.get() == [ + {"flag": "a", "result": False}, + {"flag": "c", "result": True}, + ] + + def test_merging(sentry_init, capture_events): sentry_init() From 9e64b1d4d123a84b91fab66bc667390752e0e492 Mon Sep 17 00:00:00 2001 From: Anton Pirker Date: Fri, 20 Dec 2024 14:05:46 +0100 Subject: [PATCH 7/7] Fixed OpenAI tests (#3738) --- tests/integrations/openai/test_openai.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/integrations/openai/test_openai.py b/tests/integrations/openai/test_openai.py index 011192e49f..1d5ce7a2b1 100644 --- a/tests/integrations/openai/test_openai.py +++ b/tests/integrations/openai/test_openai.py @@ -83,8 +83,8 @@ def test_nonstreaming_chat_completion( assert span["op"] == "ai.chat_completions.create.openai" if send_default_pii and include_prompts: - assert "hello" in span["data"]["ai.input_messages"]["content"] - assert "the model response" in span["data"]["ai.responses"]["content"] + assert '"content": "hello"' in span["data"]["ai.input_messages"] + assert '"content": "the model response"' in span["data"]["ai.responses"] else: assert "ai.input_messages" not in span["data"] assert "ai.responses" not in span["data"] @@ -125,8 +125,8 @@ async def test_nonstreaming_chat_completion_async( assert span["op"] == "ai.chat_completions.create.openai" if send_default_pii and include_prompts: - assert "hello" in span["data"]["ai.input_messages"]["content"] - assert "the model response" in span["data"]["ai.responses"]["content"] + assert '"content": "hello"' in span["data"]["ai.input_messages"] + assert '"content": "the model response"' in span["data"]["ai.responses"] else: assert "ai.input_messages" not in span["data"] assert "ai.responses" not in span["data"] @@ -218,7 +218,7 @@ def test_streaming_chat_completion( assert span["op"] == "ai.chat_completions.create.openai" if send_default_pii and include_prompts: - assert "hello" in span["data"]["ai.input_messages"]["content"] + assert '"content": "hello"' in span["data"]["ai.input_messages"] assert "hello world" in span["data"]["ai.responses"] else: assert "ai.input_messages" not in span["data"] @@ -314,7 +314,7 @@ async def test_streaming_chat_completion_async( assert span["op"] == "ai.chat_completions.create.openai" if send_default_pii and include_prompts: - assert "hello" in span["data"]["ai.input_messages"]["content"] + assert '"content": "hello"' in span["data"]["ai.input_messages"] assert "hello world" in span["data"]["ai.responses"] else: assert "ai.input_messages" not in span["data"] @@ -330,6 +330,7 @@ async def test_streaming_chat_completion_async( pass # if tiktoken is not installed, we can't guarantee token usage will be calculated properly +@pytest.mark.forked def test_bad_chat_completion(sentry_init, capture_events): sentry_init(integrations=[OpenAIIntegration()], traces_sample_rate=1.0) events = capture_events() @@ -460,6 +461,7 @@ async def test_embeddings_create_async( assert span["measurements"]["ai_total_tokens_used"]["value"] == 30 +@pytest.mark.forked @pytest.mark.parametrize( "send_default_pii, include_prompts", [(True, True), (True, False), (False, True), (False, False)], @@ -487,6 +489,7 @@ def test_embeddings_create_raises_error( assert event["level"] == "error" +@pytest.mark.forked @pytest.mark.asyncio @pytest.mark.parametrize( "send_default_pii, include_prompts",