From 7591b5db7d5f75b6ebafce28b078e9bd13d3c8d1 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Tue, 3 Sep 2024 18:12:49 -0400 Subject: [PATCH 01/32] WIP Gemini Integration --- ddtrace/_monkey.py | 2 + .../contrib/google_generativeai/__init__.py | 94 ++++++++ .../internal/google_generativeai/patch.py | 217 ++++++++++++++++++ ddtrace/llmobs/_integrations/__init__.py | 2 + ddtrace/llmobs/_integrations/gemini.py | 5 + 5 files changed, 320 insertions(+) create mode 100644 ddtrace/contrib/google_generativeai/__init__.py create mode 100644 ddtrace/contrib/internal/google_generativeai/patch.py create mode 100644 ddtrace/llmobs/_integrations/gemini.py diff --git a/ddtrace/_monkey.py b/ddtrace/_monkey.py index f4f907a13b6..db368ad21ba 100644 --- a/ddtrace/_monkey.py +++ b/ddtrace/_monkey.py @@ -38,6 +38,7 @@ "elasticsearch": True, "algoliasearch": True, "futures": True, + "google_generativeai": True, "gevent": True, "graphql": True, "grpc": True, @@ -138,6 +139,7 @@ "aws_lambda": ("datadog_lambda",), "httplib": ("http.client",), "kafka": ("confluent_kafka",), + "google_generativeai": ("google.generativeai",), } diff --git a/ddtrace/contrib/google_generativeai/__init__.py b/ddtrace/contrib/google_generativeai/__init__.py new file mode 100644 index 00000000000..5d55fa33743 --- /dev/null +++ b/ddtrace/contrib/google_generativeai/__init__.py @@ -0,0 +1,94 @@ +""" +The Anthropic integration instruments the Anthropic Python library to traces for requests made to the models for messages. + +All traces submitted from the Anthropic integration are tagged by: + +- ``service``, ``env``, ``version``: see the `Unified Service Tagging docs `_. +- ``anthropic.request.model``: Anthropic model used in the request. +- ``anthropic.request.api_key``: Anthropic API key used to make the request (obfuscated to match the Anthropic UI representation ``sk-...XXXX`` where ``XXXX`` is the last 4 digits of the key). +- ``anthropic.request.parameters``: Parameters used in anthropic package call. + + +(beta) Prompt and Completion Sampling +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Prompt texts and completion content for the ``Messages.create`` endpoint are collected in span tags with a default sampling rate of ``1.0``. +These tags will have truncation applied if the text exceeds the configured character limit. + + +Enabling +~~~~~~~~ + +The Anthropic integration is enabled automatically when you use +:ref:`ddtrace-run` or :ref:`import ddtrace.auto`. + +Note that these commands also enable the ``httpx`` integration which traces HTTP requests from the Anthropic library. + +Alternatively, use :func:`patch() ` to manually enable the Anthropic integration:: + + from ddtrace import config, patch + + patch(anthropic=True) + + +Global Configuration +~~~~~~~~~~~~~~~~~~~~ + +.. py:data:: ddtrace.config.anthropic["service"] + + The service name reported by default for Anthropic requests. + + Alternatively, you can set this option with the ``DD_SERVICE`` or ``DD_ANTHROPIC_SERVICE`` environment + variables. + + Default: ``DD_SERVICE`` + + +.. py:data:: (beta) ddtrace.config.anthropic["span_char_limit"] + + Configure the maximum number of characters for the following data within span tags: + + - Message inputs and completions + + Text exceeding the maximum number of characters is truncated to the character limit + and has ``...`` appended to the end. + + Alternatively, you can set this option with the ``DD_ANTHROPIC_SPAN_CHAR_LIMIT`` environment + variable. + + Default: ``128`` + + +.. py:data:: (beta) ddtrace.config.anthropic["span_prompt_completion_sample_rate"] + + Configure the sample rate for the collection of prompts and completions as span tags. + + Alternatively, you can set this option with the ``DD_ANTHROPIC_SPAN_PROMPT_COMPLETION_SAMPLE_RATE`` environment + variable. + + Default: ``1.0`` + + +Instance Configuration +~~~~~~~~~~~~~~~~~~~~~~ + +To configure the Anthropic integration on a per-instance basis use the +``Pin`` API:: + + import anthropic + from ddtrace import Pin, config + + Pin.override(anthropic, service="my-anthropic-service") +""" # noqa: E501 +from ...internal.utils.importlib import require_modules + + +required_modules = ["google.generativeai"] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from ..internal.google_generativeai.patch import patch + from ..internal.google_generativeai.patch import unpatch + from ..internal.google_generativeai.patch import get_version + + __all__ = ["patch", "unpatch", "get_version"] diff --git a/ddtrace/contrib/internal/google_generativeai/patch.py b/ddtrace/contrib/internal/google_generativeai/patch.py new file mode 100644 index 00000000000..bb7296ac7bf --- /dev/null +++ b/ddtrace/contrib/internal/google_generativeai/patch.py @@ -0,0 +1,217 @@ +import json +import os +import sys + +import google.generativeai as genai + +from ddtrace import config +from ddtrace.contrib.trace_utils import unwrap +from ddtrace.contrib.trace_utils import with_traced_module +from ddtrace.contrib.trace_utils import wrap +from ddtrace.internal.utils import get_argument_value +from ddtrace.llmobs._integrations import GeminiIntegration +from ddtrace.pin import Pin + + +config._add( + "genai", + { + "span_prompt_completion_sample_rate": float(os.getenv("DD_GOOGLE_GENAI_SPAN_PROMPT_COMPLETION_SAMPLE_RATE", 1.0)), + "span_char_limit": int(os.getenv("DD_GOOGLE_GENAI_SPAN_CHAR_LIMIT", 128)), + }, +) + + +def get_version(): + # type: () -> str + return getattr(genai, "__version__", "") + + +def _tag_request(span, integration, instance, args, kwargs): + """Tag the generation span with request details. + Includes capturing generation configuration, system prompts, message contents, and function call/responses. + """ + contents = get_argument_value(args, kwargs, 0, "contents") + generation_config = kwargs.get("generation_config", {}) + system_instruction = getattr(instance, "_system_instruction", "") + + generation_config_dict = None + if isinstance(generation_config, dict): + generation_config_dict = generation_config + elif generation_config is not None: + generation_config_dict = generation_config.__dict__ + if generation_config_dict is not None: + for k, v in generation_config_dict.items(): + span.set_tag_str("genai.request.generation_config.%s" % k, str(v)) + + if not integration.is_pc_sampled_span(span): + return + + span.set_tag("genai.request.system_instruction", integration.trunc(system_instruction)) + if isinstance(contents, str): + span.set_tag_str("genai.request.contents.0.text", integration.trunc(contents)) + elif isinstance(contents, dict): + span.set_tag_str("genai.request.contents.0.text", integration.trunc(str(contents))) + elif isinstance(contents, list): + for content_idx, content in enumerate(contents): + if isinstance(content, str): + span.set_tag_str("genai.request.contents.%d.text" % content_idx, integration.trunc(content)) + continue + if isinstance(content, dict): + role = content.get("role", "") + if role: + span.set_tag_str("genai.request.contents.%d.role" % content_idx, str(content.get("role", ""))) + span.set_tag_str( + "genai.request.contents.%d.parts" % content_idx, integration.trunc(str(content.get("parts", []))) + ) + continue + role = getattr(content, "role", "") + if role: + span.set_tag_str("genai.request.contents.%d.role" % content_idx, str(role)) + parts = getattr(content, "parts", []) + for part_idx, part in enumerate(parts): + text = getattr(part, "text", "") + span.set_tag_str( + "genai.request.contents.%d.parts.%d.text" % (content_idx, part_idx), integration.trunc(str(text)) + ) + function_call = getattr(part, "function_call", None) + if function_call: + function_call_dict = type(function_call).to_dict(function_call) + span.set_tag_str( + "genai.request.contents.%d.parts.%d.function_call.name" % (content_idx, part_idx), + integration.trunc(str(function_call_dict.get("name", ""))) + ) + span.set_tag_str( + "genai.request.contents.%d.parts.%d.function_call.args" % (content_idx, part_idx), + integration.trunc(str(function_call_dict.get("args", {}))) + ) + function_response = getattr(part, "function_response", None) + if function_response: + function_response_dict = type(function_response).to_dict(function_response) + span.set_tag_str( + "genai.request.contents.%d.parts.%d.function_response.name" % (content_idx, part_idx), + str(function_response_dict.get("name", "")) + ) + span.set_tag_str( + "genai.request.contents.%d.parts.%d.function_response.response" % (content_idx, part_idx), + integration.trunc(str(function_response_dict.get("response", {}))) + ) + + +def _tag_response(span, generations, integration): + """Tag the generation span with response details. + Includes capturing generation text, roles, finish reasons, and token counts. + """ + generations_dict = generations.to_dict() + for idx, candidate in enumerate(generations_dict.get("candidates", [])): + finish_reason = candidate.get("finish_reason", None) + if finish_reason: + span.set_tag_str("genai.response.candidates.%d.finish_reason" % idx, str(finish_reason)) + candidate_content = candidate.get("content", {}) + role = candidate_content.get("role", "") + span.set_tag_str("genai.response.candidates.%d.content.role" % idx, str(role)) + if integration.is_pc_sampled_span(span): + parts = candidate_content.get("parts", []) + for part_idx, part in enumerate(parts): + text = part.get("text", "") + span.set_tag_str( + "genai.response.candidates.%d.content.parts.%d.text" % (idx, part_idx), + integration.trunc(str(text)), + ) + function_call = part.get("function_call", None) + if function_call: + span.set_tag_str( + "genai.response.candidates.%d.content.parts.%d.function_call.name" % (idx, part_idx), + integration.trunc(function_call.get("name", "")) + ) + span.set_tag_str( + "genai.response.candidates.%d.content.parts.%d.function_call.args" % (idx, part_idx), + integration.trunc(str(function_call.get("args", {}))) + ) + token_counts = generations_dict.get("usage_metadata", None) + if token_counts: + span.set_metric("genai.response.usage.prompt_tokens", token_counts.get("prompt_token_count", 0)) + span.set_metric("genai.response.usage.completion_tokens", token_counts.get("candidates_token_count", 0)) + span.set_metric("genai.response.usage.total_tokens", token_counts.get("total_token_count", 0)) + + +@with_traced_module +def traced_generate(genai, pin, func, instance, args, kwargs): + integration = genai._datadog_integration + stream = kwargs.get("stream", False) + generations = None + span = integration.trace( + pin, "%s.%s" % (instance.__class__.__name__, func.__name__), submit_to_llmobs=True, provider="google", + ) + try: + _tag_request(span, integration, instance, args, kwargs) + generations = func(*args, **kwargs) + if stream: + return # TODO: handle streams + else: + _tag_response(span, generations, integration) + + except Exception: + span.set_exc_info(*sys.exc_info()) + raise + finally: + if integration.is_pc_sampled_llmobs(span): + integration.set_llmobs_tags(span, generations) + # streamed spans will be finished separately once the stream generator is exhausted + if span.error or not stream: + span.finish() + return generations + + +@with_traced_module +async def traced_agenerate(genai, pin, func, instance, args, kwargs): + integration = genai._datadog_integration + stream = kwargs.get("stream", False) + generations = None + span = integration.trace( + pin, "%s.%s" % (instance.__class__.__name__, func.__name__), submit_to_llmobs=True, provider="google", + ) + try: + _tag_request(span, integration, instance, args, kwargs) + generations = await func(*args, **kwargs) + if stream: + return # TODO: handle streams + else: + _tag_response(span, generations, integration) + + except Exception: + span.set_exc_info(*sys.exc_info()) + raise + finally: + if integration.is_pc_sampled_llmobs(span): + integration.set_llmobs_tags(span, generations) + # streamed spans will be finished separately once the stream generator is exhausted + if span.error or not stream: + span.finish() + return generations + + +def patch(): + if getattr(genai, "_datadog_patch", False): + return + + genai._datadog_patch = True + + Pin().onto(genai) + integration = GeminiIntegration(integration_config=config.genai) + genai._datadog_integration = integration + + wrap("google.generativeai", "GenerativeModel.generate_content", traced_generate(genai)) + wrap("google.generativeai", "GenerativeModel.generate_content_async", traced_agenerate(genai)) + + +def unpatch(): + if not getattr(genai, "_datadog_patch", False): + return + + genai._datadog_patch = False + + unwrap(genai.GenerativeModel, "generate_content") + unwrap(genai.GenerativeModel, "generate_content_async") + + delattr(genai, "_datadog_integration") diff --git a/ddtrace/llmobs/_integrations/__init__.py b/ddtrace/llmobs/_integrations/__init__.py index 465cab1bb3d..5303b0530aa 100644 --- a/ddtrace/llmobs/_integrations/__init__.py +++ b/ddtrace/llmobs/_integrations/__init__.py @@ -1,6 +1,7 @@ from .anthropic import AnthropicIntegration from .base import BaseLLMIntegration from .bedrock import BedrockIntegration +from .gemini import GeminiIntegration from .langchain import LangChainIntegration from .openai import OpenAIIntegration @@ -9,6 +10,7 @@ "AnthropicIntegration", "BaseLLMIntegration", "BedrockIntegration", + "GeminiIntegration", "LangChainIntegration", "OpenAIIntegration", ] diff --git a/ddtrace/llmobs/_integrations/gemini.py b/ddtrace/llmobs/_integrations/gemini.py new file mode 100644 index 00000000000..258c9e16fdb --- /dev/null +++ b/ddtrace/llmobs/_integrations/gemini.py @@ -0,0 +1,5 @@ +from .base import BaseLLMIntegration + + +class GeminiIntegration(BaseLLMIntegration): + _integration_name = "gemini" From f4c62a533522629ff259f7960e124639d8e43307 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Wed, 4 Sep 2024 17:27:59 -0400 Subject: [PATCH 02/32] Support tracing streamed responses --- .../internal/google_generativeai/patch.py | 169 +++++++++++------- 1 file changed, 103 insertions(+), 66 deletions(-) diff --git a/ddtrace/contrib/internal/google_generativeai/patch.py b/ddtrace/contrib/internal/google_generativeai/patch.py index bb7296ac7bf..d78f9ddf82a 100644 --- a/ddtrace/contrib/internal/google_generativeai/patch.py +++ b/ddtrace/contrib/internal/google_generativeai/patch.py @@ -3,6 +3,7 @@ import sys import google.generativeai as genai +import wrapt from ddtrace import config from ddtrace.contrib.trace_utils import unwrap @@ -27,6 +28,44 @@ def get_version(): return getattr(genai, "__version__", "") +class BaseTracedGenerateContentResponse(wrapt.ObjectProxy): + """Base wrapper class for GenerateContentResponse objects for tracing streamed responses.""" + def __init__(self, wrapped, integration, span, args, kwargs): + super().__init__(wrapped) + self._dd_integration = integration + self._dd_span = span + self._args = args + self._kwargs = kwargs + + +class TracedGenerateContentResponse(BaseTracedGenerateContentResponse): + def __iter__(self): + try: + for chunk in self.__wrapped__.__iter__(): + yield chunk + except Exception: + self._dd_span.set_exc_info(*sys.exc_info()) + raise + else: + _tag_response(self._dd_span, self.__wrapped__, self._dd_integration) + finally: + self._dd_span.finish() + + +class TracedAsyncGenerateContentResponse(BaseTracedGenerateContentResponse): + async def __aiter__(self): + try: + async for chunk in self.__wrapped__.__aiter__(): + yield chunk + except Exception: + self._dd_span.set_exc_info(*sys.exc_info()) + raise + else: + _tag_response(self._dd_span, self.__wrapped__, self._dd_integration) + finally: + self._dd_span.finish() + + def _tag_request(span, integration, instance, args, kwargs): """Tag the generation span with request details. Includes capturing generation configuration, system prompts, message contents, and function call/responses. @@ -50,52 +89,55 @@ def _tag_request(span, integration, instance, args, kwargs): span.set_tag("genai.request.system_instruction", integration.trunc(system_instruction)) if isinstance(contents, str): span.set_tag_str("genai.request.contents.0.text", integration.trunc(contents)) + return elif isinstance(contents, dict): span.set_tag_str("genai.request.contents.0.text", integration.trunc(str(contents))) - elif isinstance(contents, list): - for content_idx, content in enumerate(contents): - if isinstance(content, str): - span.set_tag_str("genai.request.contents.%d.text" % content_idx, integration.trunc(content)) - continue - if isinstance(content, dict): - role = content.get("role", "") - if role: - span.set_tag_str("genai.request.contents.%d.role" % content_idx, str(content.get("role", ""))) + return + elif not isinstance(contents, list): + return + for content_idx, content in enumerate(contents): + if isinstance(content, str): + span.set_tag_str("genai.request.contents.%d.text" % content_idx, integration.trunc(content)) + continue + if isinstance(content, dict): + role = content.get("role", "") + if role: + span.set_tag_str("genai.request.contents.%d.role" % content_idx, str(content.get("role", ""))) + span.set_tag_str( + "genai.request.contents.%d.parts" % content_idx, integration.trunc(str(content.get("parts", []))) + ) + continue + role = getattr(content, "role", "") + if role: + span.set_tag_str("genai.request.contents.%d.role" % content_idx, str(role)) + parts = getattr(content, "parts", []) + for part_idx, part in enumerate(parts): + text = getattr(part, "text", "") + span.set_tag_str( + "genai.request.contents.%d.parts.%d.text" % (content_idx, part_idx), integration.trunc(str(text)) + ) + function_call = getattr(part, "function_call", None) + if function_call: + function_call_dict = type(function_call).to_dict(function_call) span.set_tag_str( - "genai.request.contents.%d.parts" % content_idx, integration.trunc(str(content.get("parts", []))) + "genai.request.contents.%d.parts.%d.function_call.name" % (content_idx, part_idx), + integration.trunc(str(function_call_dict.get("name", ""))) ) - continue - role = getattr(content, "role", "") - if role: - span.set_tag_str("genai.request.contents.%d.role" % content_idx, str(role)) - parts = getattr(content, "parts", []) - for part_idx, part in enumerate(parts): - text = getattr(part, "text", "") span.set_tag_str( - "genai.request.contents.%d.parts.%d.text" % (content_idx, part_idx), integration.trunc(str(text)) + "genai.request.contents.%d.parts.%d.function_call.args" % (content_idx, part_idx), + integration.trunc(str(function_call_dict.get("args", {}))) + ) + function_response = getattr(part, "function_response", None) + if function_response: + function_response_dict = type(function_response).to_dict(function_response) + span.set_tag_str( + "genai.request.contents.%d.parts.%d.function_response.name" % (content_idx, part_idx), + str(function_response_dict.get("name", "")) + ) + span.set_tag_str( + "genai.request.contents.%d.parts.%d.function_response.response" % (content_idx, part_idx), + integration.trunc(str(function_response_dict.get("response", {}))) ) - function_call = getattr(part, "function_call", None) - if function_call: - function_call_dict = type(function_call).to_dict(function_call) - span.set_tag_str( - "genai.request.contents.%d.parts.%d.function_call.name" % (content_idx, part_idx), - integration.trunc(str(function_call_dict.get("name", ""))) - ) - span.set_tag_str( - "genai.request.contents.%d.parts.%d.function_call.args" % (content_idx, part_idx), - integration.trunc(str(function_call_dict.get("args", {}))) - ) - function_response = getattr(part, "function_response", None) - if function_response: - function_response_dict = type(function_response).to_dict(function_response) - span.set_tag_str( - "genai.request.contents.%d.parts.%d.function_response.name" % (content_idx, part_idx), - str(function_response_dict.get("name", "")) - ) - span.set_tag_str( - "genai.request.contents.%d.parts.%d.function_response.response" % (content_idx, part_idx), - integration.trunc(str(function_response_dict.get("response", {}))) - ) def _tag_response(span, generations, integration): @@ -110,24 +152,25 @@ def _tag_response(span, generations, integration): candidate_content = candidate.get("content", {}) role = candidate_content.get("role", "") span.set_tag_str("genai.response.candidates.%d.content.role" % idx, str(role)) - if integration.is_pc_sampled_span(span): - parts = candidate_content.get("parts", []) - for part_idx, part in enumerate(parts): - text = part.get("text", "") + if not integration.is_pc_sampled_span(span): + continue + parts = candidate_content.get("parts", []) + for part_idx, part in enumerate(parts): + text = part.get("text", "") + span.set_tag_str( + "genai.response.candidates.%d.content.parts.%d.text" % (idx, part_idx), + integration.trunc(str(text)), + ) + function_call = part.get("function_call", None) + if function_call: span.set_tag_str( - "genai.response.candidates.%d.content.parts.%d.text" % (idx, part_idx), - integration.trunc(str(text)), + "genai.response.candidates.%d.content.parts.%d.function_call.name" % (idx, part_idx), + integration.trunc(function_call.get("name", "")) + ) + span.set_tag_str( + "genai.response.candidates.%d.content.parts.%d.function_call.args" % (idx, part_idx), + integration.trunc(str(function_call.get("args", {}))) ) - function_call = part.get("function_call", None) - if function_call: - span.set_tag_str( - "genai.response.candidates.%d.content.parts.%d.function_call.name" % (idx, part_idx), - integration.trunc(function_call.get("name", "")) - ) - span.set_tag_str( - "genai.response.candidates.%d.content.parts.%d.function_call.args" % (idx, part_idx), - integration.trunc(str(function_call.get("args", {}))) - ) token_counts = generations_dict.get("usage_metadata", None) if token_counts: span.set_metric("genai.response.usage.prompt_tokens", token_counts.get("prompt_token_count", 0)) @@ -141,22 +184,19 @@ def traced_generate(genai, pin, func, instance, args, kwargs): stream = kwargs.get("stream", False) generations = None span = integration.trace( - pin, "%s.%s" % (instance.__class__.__name__, func.__name__), submit_to_llmobs=True, provider="google", + pin, "%s.%s" % (instance.__class__.__name__, func.__name__), provider="google", ) try: _tag_request(span, integration, instance, args, kwargs) generations = func(*args, **kwargs) if stream: - return # TODO: handle streams + return TracedGenerateContentResponse(generations, integration, span, args, kwargs) else: _tag_response(span, generations, integration) - except Exception: span.set_exc_info(*sys.exc_info()) raise finally: - if integration.is_pc_sampled_llmobs(span): - integration.set_llmobs_tags(span, generations) # streamed spans will be finished separately once the stream generator is exhausted if span.error or not stream: span.finish() @@ -169,22 +209,19 @@ async def traced_agenerate(genai, pin, func, instance, args, kwargs): stream = kwargs.get("stream", False) generations = None span = integration.trace( - pin, "%s.%s" % (instance.__class__.__name__, func.__name__), submit_to_llmobs=True, provider="google", + pin, "%s.%s" % (instance.__class__.__name__, func.__name__), provider="google", ) try: _tag_request(span, integration, instance, args, kwargs) generations = await func(*args, **kwargs) if stream: - return # TODO: handle streams + return TracedAsyncGenerateContentResponse(generations, integration, span, args, kwargs) else: _tag_response(span, generations, integration) - except Exception: span.set_exc_info(*sys.exc_info()) raise finally: - if integration.is_pc_sampled_llmobs(span): - integration.set_llmobs_tags(span, generations) # streamed spans will be finished separately once the stream generator is exhausted if span.error or not stream: span.finish() From 7aed48b22044565df69005a893f78c8b35d2d3e4 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Wed, 4 Sep 2024 17:59:47 -0400 Subject: [PATCH 03/32] fmt, refactor nesting --- .../internal/google_generativeai/_utils.py | 173 +++++++++++++++++ .../internal/google_generativeai/patch.py | 179 ++---------------- 2 files changed, 190 insertions(+), 162 deletions(-) create mode 100644 ddtrace/contrib/internal/google_generativeai/_utils.py diff --git a/ddtrace/contrib/internal/google_generativeai/_utils.py b/ddtrace/contrib/internal/google_generativeai/_utils.py new file mode 100644 index 00000000000..b15c95857c0 --- /dev/null +++ b/ddtrace/contrib/internal/google_generativeai/_utils.py @@ -0,0 +1,173 @@ +import sys + +import wrapt + +from ddtrace.internal.utils import get_argument_value + + +class BaseTracedGenerateContentResponse(wrapt.ObjectProxy): + """Base wrapper class for GenerateContentResponse objects for tracing streamed responses.""" + + def __init__(self, wrapped, integration, span, args, kwargs): + super().__init__(wrapped) + self._dd_integration = integration + self._dd_span = span + self._args = args + self._kwargs = kwargs + + +class TracedGenerateContentResponse(BaseTracedGenerateContentResponse): + def __iter__(self): + try: + for chunk in self.__wrapped__.__iter__(): + yield chunk + except Exception: + self._dd_span.set_exc_info(*sys.exc_info()) + raise + else: + tag_response(self._dd_span, self.__wrapped__, self._dd_integration) + finally: + self._dd_span.finish() + + +class TracedAsyncGenerateContentResponse(BaseTracedGenerateContentResponse): + async def __aiter__(self): + try: + async for chunk in self.__wrapped__.__aiter__(): + yield chunk + except Exception: + self._dd_span.set_exc_info(*sys.exc_info()) + raise + else: + tag_response(self._dd_span, self.__wrapped__, self._dd_integration) + finally: + self._dd_span.finish() + + +def _tag_request_content_part(span, integration, part, part_idx, content_idx): + """Tag the generation span with request content parts.""" + text = getattr(part, "text", "") + span.set_tag_str("genai.request.contents.%d.parts.%d.text" % (content_idx, part_idx), integration.trunc(str(text))) + function_call = getattr(part, "function_call", None) + if function_call: + function_call_dict = type(function_call).to_dict(function_call) + span.set_tag_str( + "genai.request.contents.%d.parts.%d.function_call.name" % (content_idx, part_idx), + integration.trunc(str(function_call_dict.get("name", ""))), + ) + span.set_tag_str( + "genai.request.contents.%d.parts.%d.function_call.args" % (content_idx, part_idx), + integration.trunc(str(function_call_dict.get("args", {}))), + ) + function_response = getattr(part, "function_response", None) + if function_response: + function_response_dict = type(function_response).to_dict(function_response) + span.set_tag_str( + "genai.request.contents.%d.parts.%d.function_response.name" % (content_idx, part_idx), + str(function_response_dict.get("name", "")), + ) + span.set_tag_str( + "genai.request.contents.%d.parts.%d.function_response.response" % (content_idx, part_idx), + integration.trunc(str(function_response_dict.get("response", {}))), + ) + + +def _tag_request_content(span, integration, content, content_idx): + """Tag the generation span with request contents.""" + if isinstance(content, str): + span.set_tag_str("genai.request.contents.%d.text" % content_idx, integration.trunc(content)) + return + if isinstance(content, dict): + role = content.get("role", "") + if role: + span.set_tag_str("genai.request.contents.%d.role" % content_idx, str(content.get("role", ""))) + span.set_tag_str( + "genai.request.contents.%d.parts" % content_idx, integration.trunc(str(content.get("parts", []))) + ) + return + role = getattr(content, "role", "") + if role: + span.set_tag_str("genai.request.contents.%d.role" % content_idx, str(role)) + parts = getattr(content, "parts", []) + for part_idx, part in enumerate(parts): + _tag_request_content_part(span, integration, part, part_idx, content_idx) + + +def _tag_response_part(span, integration, part, part_idx, candidate_idx): + """Tag the generation span with response part text and function calls.""" + text = part.get("text", "") + span.set_tag_str( + "genai.response.candidates.%d.content.parts.%d.text" % (candidate_idx, part_idx), + integration.trunc(str(text)), + ) + function_call = part.get("function_call", None) + if not function_call: + return + span.set_tag_str( + "genai.response.candidates.%d.content.parts.%d.function_call.name" % (candidate_idx, part_idx), + integration.trunc(str(function_call.get("name", ""))), + ) + span.set_tag_str( + "genai.response.candidates.%d.content.parts.%d.function_call.args" % (candidate_idx, part_idx), + integration.trunc(str(function_call.get("args", {}))), + ) + + +def tag_request(span, integration, instance, args, kwargs): + """Tag the generation span with request details. + Includes capturing generation configuration, system prompt, and messages. + """ + contents = get_argument_value(args, kwargs, 0, "contents") + generation_config = kwargs.get("generation_config", {}) + system_instruction = getattr(instance, "_system_instruction", "") + + generation_config_dict = None + if isinstance(generation_config, dict): + generation_config_dict = generation_config + elif generation_config is not None: + generation_config_dict = generation_config.__dict__ + if generation_config_dict is not None: + for k, v in generation_config_dict.items(): + span.set_tag_str("genai.request.generation_config.%s" % k, str(v)) + + if not integration.is_pc_sampled_span(span): + return + + span.set_tag("genai.request.system_instruction", integration.trunc(system_instruction)) + + if isinstance(contents, str): + span.set_tag_str("genai.request.contents.0.text", integration.trunc(contents)) + return + elif isinstance(contents, dict): + span.set_tag_str("genai.request.contents.0.text", integration.trunc(str(contents))) + return + elif not isinstance(contents, list): + return + for content_idx, content in enumerate(contents): + _tag_request_content(span, integration, content, content_idx) + + +def tag_response(span, generations, integration): + """Tag the generation span with response details. + Includes capturing generation text, roles, finish reasons, and token counts. + """ + generations_dict = generations.to_dict() + for candidate_idx, candidate in enumerate(generations_dict.get("candidates", [])): + finish_reason = candidate.get("finish_reason", None) + if finish_reason: + span.set_tag_str("genai.response.candidates.%d.finish_reason" % candidate_idx, str(finish_reason)) + candidate_content = candidate.get("content", {}) + role = candidate_content.get("role", "") + span.set_tag_str("genai.response.candidates.%d.content.role" % candidate_idx, str(role)) + if not integration.is_pc_sampled_span(span): + continue + parts = candidate_content.get("parts", []) + for part_idx, part in enumerate(parts): + _tag_response_part(span, integration, parts, part_idx, candidate_idx) + + token_counts = generations_dict.get("usage_metadata", None) + if not token_counts: + return + span.set_metric("genai.response.usage.prompt_tokens", token_counts.get("prompt_token_count", 0)) + span.set_metric("genai.response.usage.completion_tokens", token_counts.get("candidates_token_count", 0)) + span.set_metric("genai.response.usage.total_tokens", token_counts.get("total_token_count", 0)) diff --git a/ddtrace/contrib/internal/google_generativeai/patch.py b/ddtrace/contrib/internal/google_generativeai/patch.py index d78f9ddf82a..8f464f81502 100644 --- a/ddtrace/contrib/internal/google_generativeai/patch.py +++ b/ddtrace/contrib/internal/google_generativeai/patch.py @@ -1,15 +1,16 @@ -import json import os import sys import google.generativeai as genai -import wrapt from ddtrace import config +from ddtrace.contrib.internal.google_generativeai._utils import tag_request +from ddtrace.contrib.internal.google_generativeai._utils import tag_response +from ddtrace.contrib.internal.google_generativeai._utils import TracedAsyncGenerateContentResponse +from ddtrace.contrib.internal.google_generativeai._utils import TracedGenerateContentResponse from ddtrace.contrib.trace_utils import unwrap from ddtrace.contrib.trace_utils import with_traced_module from ddtrace.contrib.trace_utils import wrap -from ddtrace.internal.utils import get_argument_value from ddtrace.llmobs._integrations import GeminiIntegration from ddtrace.pin import Pin @@ -17,7 +18,9 @@ config._add( "genai", { - "span_prompt_completion_sample_rate": float(os.getenv("DD_GOOGLE_GENAI_SPAN_PROMPT_COMPLETION_SAMPLE_RATE", 1.0)), + "span_prompt_completion_sample_rate": float( + os.getenv("DD_GOOGLE_GENAI_SPAN_PROMPT_COMPLETION_SAMPLE_RATE", 1.0) + ), "span_char_limit": int(os.getenv("DD_GOOGLE_GENAI_SPAN_CHAR_LIMIT", 128)), }, ) @@ -28,171 +31,22 @@ def get_version(): return getattr(genai, "__version__", "") -class BaseTracedGenerateContentResponse(wrapt.ObjectProxy): - """Base wrapper class for GenerateContentResponse objects for tracing streamed responses.""" - def __init__(self, wrapped, integration, span, args, kwargs): - super().__init__(wrapped) - self._dd_integration = integration - self._dd_span = span - self._args = args - self._kwargs = kwargs - - -class TracedGenerateContentResponse(BaseTracedGenerateContentResponse): - def __iter__(self): - try: - for chunk in self.__wrapped__.__iter__(): - yield chunk - except Exception: - self._dd_span.set_exc_info(*sys.exc_info()) - raise - else: - _tag_response(self._dd_span, self.__wrapped__, self._dd_integration) - finally: - self._dd_span.finish() - - -class TracedAsyncGenerateContentResponse(BaseTracedGenerateContentResponse): - async def __aiter__(self): - try: - async for chunk in self.__wrapped__.__aiter__(): - yield chunk - except Exception: - self._dd_span.set_exc_info(*sys.exc_info()) - raise - else: - _tag_response(self._dd_span, self.__wrapped__, self._dd_integration) - finally: - self._dd_span.finish() - - -def _tag_request(span, integration, instance, args, kwargs): - """Tag the generation span with request details. - Includes capturing generation configuration, system prompts, message contents, and function call/responses. - """ - contents = get_argument_value(args, kwargs, 0, "contents") - generation_config = kwargs.get("generation_config", {}) - system_instruction = getattr(instance, "_system_instruction", "") - - generation_config_dict = None - if isinstance(generation_config, dict): - generation_config_dict = generation_config - elif generation_config is not None: - generation_config_dict = generation_config.__dict__ - if generation_config_dict is not None: - for k, v in generation_config_dict.items(): - span.set_tag_str("genai.request.generation_config.%s" % k, str(v)) - - if not integration.is_pc_sampled_span(span): - return - - span.set_tag("genai.request.system_instruction", integration.trunc(system_instruction)) - if isinstance(contents, str): - span.set_tag_str("genai.request.contents.0.text", integration.trunc(contents)) - return - elif isinstance(contents, dict): - span.set_tag_str("genai.request.contents.0.text", integration.trunc(str(contents))) - return - elif not isinstance(contents, list): - return - for content_idx, content in enumerate(contents): - if isinstance(content, str): - span.set_tag_str("genai.request.contents.%d.text" % content_idx, integration.trunc(content)) - continue - if isinstance(content, dict): - role = content.get("role", "") - if role: - span.set_tag_str("genai.request.contents.%d.role" % content_idx, str(content.get("role", ""))) - span.set_tag_str( - "genai.request.contents.%d.parts" % content_idx, integration.trunc(str(content.get("parts", []))) - ) - continue - role = getattr(content, "role", "") - if role: - span.set_tag_str("genai.request.contents.%d.role" % content_idx, str(role)) - parts = getattr(content, "parts", []) - for part_idx, part in enumerate(parts): - text = getattr(part, "text", "") - span.set_tag_str( - "genai.request.contents.%d.parts.%d.text" % (content_idx, part_idx), integration.trunc(str(text)) - ) - function_call = getattr(part, "function_call", None) - if function_call: - function_call_dict = type(function_call).to_dict(function_call) - span.set_tag_str( - "genai.request.contents.%d.parts.%d.function_call.name" % (content_idx, part_idx), - integration.trunc(str(function_call_dict.get("name", ""))) - ) - span.set_tag_str( - "genai.request.contents.%d.parts.%d.function_call.args" % (content_idx, part_idx), - integration.trunc(str(function_call_dict.get("args", {}))) - ) - function_response = getattr(part, "function_response", None) - if function_response: - function_response_dict = type(function_response).to_dict(function_response) - span.set_tag_str( - "genai.request.contents.%d.parts.%d.function_response.name" % (content_idx, part_idx), - str(function_response_dict.get("name", "")) - ) - span.set_tag_str( - "genai.request.contents.%d.parts.%d.function_response.response" % (content_idx, part_idx), - integration.trunc(str(function_response_dict.get("response", {}))) - ) - - -def _tag_response(span, generations, integration): - """Tag the generation span with response details. - Includes capturing generation text, roles, finish reasons, and token counts. - """ - generations_dict = generations.to_dict() - for idx, candidate in enumerate(generations_dict.get("candidates", [])): - finish_reason = candidate.get("finish_reason", None) - if finish_reason: - span.set_tag_str("genai.response.candidates.%d.finish_reason" % idx, str(finish_reason)) - candidate_content = candidate.get("content", {}) - role = candidate_content.get("role", "") - span.set_tag_str("genai.response.candidates.%d.content.role" % idx, str(role)) - if not integration.is_pc_sampled_span(span): - continue - parts = candidate_content.get("parts", []) - for part_idx, part in enumerate(parts): - text = part.get("text", "") - span.set_tag_str( - "genai.response.candidates.%d.content.parts.%d.text" % (idx, part_idx), - integration.trunc(str(text)), - ) - function_call = part.get("function_call", None) - if function_call: - span.set_tag_str( - "genai.response.candidates.%d.content.parts.%d.function_call.name" % (idx, part_idx), - integration.trunc(function_call.get("name", "")) - ) - span.set_tag_str( - "genai.response.candidates.%d.content.parts.%d.function_call.args" % (idx, part_idx), - integration.trunc(str(function_call.get("args", {}))) - ) - token_counts = generations_dict.get("usage_metadata", None) - if token_counts: - span.set_metric("genai.response.usage.prompt_tokens", token_counts.get("prompt_token_count", 0)) - span.set_metric("genai.response.usage.completion_tokens", token_counts.get("candidates_token_count", 0)) - span.set_metric("genai.response.usage.total_tokens", token_counts.get("total_token_count", 0)) - - @with_traced_module def traced_generate(genai, pin, func, instance, args, kwargs): integration = genai._datadog_integration stream = kwargs.get("stream", False) generations = None span = integration.trace( - pin, "%s.%s" % (instance.__class__.__name__, func.__name__), provider="google", + pin, + "%s.%s" % (instance.__class__.__name__, func.__name__), + provider="google", ) try: - _tag_request(span, integration, instance, args, kwargs) + tag_request(span, integration, instance, args, kwargs) generations = func(*args, **kwargs) if stream: return TracedGenerateContentResponse(generations, integration, span, args, kwargs) - else: - _tag_response(span, generations, integration) + tag_response(span, generations, integration) except Exception: span.set_exc_info(*sys.exc_info()) raise @@ -209,15 +63,16 @@ async def traced_agenerate(genai, pin, func, instance, args, kwargs): stream = kwargs.get("stream", False) generations = None span = integration.trace( - pin, "%s.%s" % (instance.__class__.__name__, func.__name__), provider="google", + pin, + "%s.%s" % (instance.__class__.__name__, func.__name__), + provider="google", ) try: - _tag_request(span, integration, instance, args, kwargs) + tag_request(span, integration, instance, args, kwargs) generations = await func(*args, **kwargs) if stream: return TracedAsyncGenerateContentResponse(generations, integration, span, args, kwargs) - else: - _tag_response(span, generations, integration) + tag_response(span, generations, integration) except Exception: span.set_exc_info(*sys.exc_info()) raise From c8e8c57912c23331150a1711d20b91491b61957c Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Wed, 4 Sep 2024 18:27:31 -0400 Subject: [PATCH 04/32] fmt, release note draft --- ddtrace/contrib/internal/google_generativeai/patch.py | 4 ++-- releasenotes/notes/feat-google-gemini-d5ee30b1d711bc08.yaml | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/feat-google-gemini-d5ee30b1d711bc08.yaml diff --git a/ddtrace/contrib/internal/google_generativeai/patch.py b/ddtrace/contrib/internal/google_generativeai/patch.py index 8f464f81502..286a3ec5251 100644 --- a/ddtrace/contrib/internal/google_generativeai/patch.py +++ b/ddtrace/contrib/internal/google_generativeai/patch.py @@ -4,10 +4,10 @@ import google.generativeai as genai from ddtrace import config -from ddtrace.contrib.internal.google_generativeai._utils import tag_request -from ddtrace.contrib.internal.google_generativeai._utils import tag_response from ddtrace.contrib.internal.google_generativeai._utils import TracedAsyncGenerateContentResponse from ddtrace.contrib.internal.google_generativeai._utils import TracedGenerateContentResponse +from ddtrace.contrib.internal.google_generativeai._utils import tag_request +from ddtrace.contrib.internal.google_generativeai._utils import tag_response from ddtrace.contrib.trace_utils import unwrap from ddtrace.contrib.trace_utils import with_traced_module from ddtrace.contrib.trace_utils import wrap diff --git a/releasenotes/notes/feat-google-gemini-d5ee30b1d711bc08.yaml b/releasenotes/notes/feat-google-gemini-d5ee30b1d711bc08.yaml new file mode 100644 index 00000000000..7a0315be804 --- /dev/null +++ b/releasenotes/notes/feat-google-gemini-d5ee30b1d711bc08.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + google_generativeai: This introduces tracing support for Google Gemini API ``generate_content`` calls. + See `the docs `_ + for more information. From a95f2a6258168346bda1ff23000d549ff6fc2ace Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Thu, 5 Sep 2024 17:24:24 -0400 Subject: [PATCH 05/32] Extract api key, model name --- .../internal/google_generativeai/_utils.py | 40 ++++++++++++++++--- .../internal/google_generativeai/patch.py | 12 ++++-- 2 files changed, 43 insertions(+), 9 deletions(-) diff --git a/ddtrace/contrib/internal/google_generativeai/_utils.py b/ddtrace/contrib/internal/google_generativeai/_utils.py index b15c95857c0..d23467b7804 100644 --- a/ddtrace/contrib/internal/google_generativeai/_utils.py +++ b/ddtrace/contrib/internal/google_generativeai/_utils.py @@ -8,8 +8,9 @@ class BaseTracedGenerateContentResponse(wrapt.ObjectProxy): """Base wrapper class for GenerateContentResponse objects for tracing streamed responses.""" - def __init__(self, wrapped, integration, span, args, kwargs): + def __init__(self, wrapped, instance, integration, span, args, kwargs): super().__init__(wrapped) + self._model_instance = instance self._dd_integration = integration self._dd_span = span self._args = args @@ -25,7 +26,7 @@ def __iter__(self): self._dd_span.set_exc_info(*sys.exc_info()) raise else: - tag_response(self._dd_span, self.__wrapped__, self._dd_integration) + tag_response(self._dd_span, self.__wrapped__, self._dd_integration, self._model_instance) finally: self._dd_span.finish() @@ -39,11 +40,34 @@ async def __aiter__(self): self._dd_span.set_exc_info(*sys.exc_info()) raise else: - tag_response(self._dd_span, self.__wrapped__, self._dd_integration) + tag_response(self._dd_span, self.__wrapped__, self._dd_integration, self._model_instance) finally: self._dd_span.finish() +def _extract_model_name(instance): + """Extract the model name from the instance.""" + model_name = getattr(instance, "model_name", "") + if not model_name or not isinstance(model_name, str): + return "" + if "/" in model_name: + return model_name.split("/")[-1] + return model_name + + +def _extract_api_key(instance): + """Extract the API key from the model instance.""" + client = getattr(instance, "_client", None) + if getattr(instance, "_async_client", None): + client = getattr(instance._async_client, "_client", None) + if not client: + return None + client_options = getattr(client, "_client_options", None) + if not client_options: + return None + return getattr(client_options, "api_key", None) + + def _tag_request_content_part(span, integration, part, part_idx, content_idx): """Tag the generation span with request content parts.""" text = getattr(part, "text", "") @@ -130,6 +154,8 @@ def tag_request(span, integration, instance, args, kwargs): for k, v in generation_config_dict.items(): span.set_tag_str("genai.request.generation_config.%s" % k, str(v)) + span.set_tag_str("genai.request.model", str(_extract_model_name(instance))) + if not integration.is_pc_sampled_span(span): return @@ -147,10 +173,14 @@ def tag_request(span, integration, instance, args, kwargs): _tag_request_content(span, integration, content, content_idx) -def tag_response(span, generations, integration): +def tag_response(span, generations, integration, instance): """Tag the generation span with response details. Includes capturing generation text, roles, finish reasons, and token counts. """ + api_key = _extract_api_key(instance) + if api_key: + span.set_tag("genai.request.api_key", "...{}".format(api_key[-4:])) + generations_dict = generations.to_dict() for candidate_idx, candidate in enumerate(generations_dict.get("candidates", [])): finish_reason = candidate.get("finish_reason", None) @@ -163,7 +193,7 @@ def tag_response(span, generations, integration): continue parts = candidate_content.get("parts", []) for part_idx, part in enumerate(parts): - _tag_response_part(span, integration, parts, part_idx, candidate_idx) + _tag_response_part(span, integration, part, part_idx, candidate_idx) token_counts = generations_dict.get("usage_metadata", None) if not token_counts: diff --git a/ddtrace/contrib/internal/google_generativeai/patch.py b/ddtrace/contrib/internal/google_generativeai/patch.py index 286a3ec5251..25b9fa945fb 100644 --- a/ddtrace/contrib/internal/google_generativeai/patch.py +++ b/ddtrace/contrib/internal/google_generativeai/patch.py @@ -6,6 +6,7 @@ from ddtrace import config from ddtrace.contrib.internal.google_generativeai._utils import TracedAsyncGenerateContentResponse from ddtrace.contrib.internal.google_generativeai._utils import TracedGenerateContentResponse +from ddtrace.contrib.internal.google_generativeai._utils import _extract_api_key from ddtrace.contrib.internal.google_generativeai._utils import tag_request from ddtrace.contrib.internal.google_generativeai._utils import tag_response from ddtrace.contrib.trace_utils import unwrap @@ -44,9 +45,12 @@ def traced_generate(genai, pin, func, instance, args, kwargs): try: tag_request(span, integration, instance, args, kwargs) generations = func(*args, **kwargs) + api_key = _extract_api_key(instance) + if api_key: + span.set_tag("genai.request.api_key", "...{}".format(api_key[-4:])) if stream: - return TracedGenerateContentResponse(generations, integration, span, args, kwargs) - tag_response(span, generations, integration) + return TracedGenerateContentResponse(generations, instance, integration, span, args, kwargs) + tag_response(span, generations, integration, instance) except Exception: span.set_exc_info(*sys.exc_info()) raise @@ -71,8 +75,8 @@ async def traced_agenerate(genai, pin, func, instance, args, kwargs): tag_request(span, integration, instance, args, kwargs) generations = await func(*args, **kwargs) if stream: - return TracedAsyncGenerateContentResponse(generations, integration, span, args, kwargs) - tag_response(span, generations, integration) + return TracedAsyncGenerateContentResponse(generations, instance, integration, span, args, kwargs) + tag_response(span, generations, integration, instance) except Exception: span.set_exc_info(*sys.exc_info()) raise From 364bc73b1f7ab408a419605831b8bf34d7d4567b Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Mon, 9 Sep 2024 16:46:33 -0400 Subject: [PATCH 06/32] Add tests --- .gitlab/tests/contrib.yml | 5 + .riot/requirements/1025297.txt | 50 +++ .riot/requirements/1736f46.txt | 48 +++ .riot/requirements/1b72277.txt | 50 +++ .riot/requirements/9b83479.txt | 48 +++ .../internal/google_generativeai/_utils.py | 30 +- riotfile.py | 10 + tests/contrib/google_generativeai/__init__.py | 0 tests/contrib/google_generativeai/conftest.py | 70 ++++ .../google_generativeai/test_data/apple.jpg | Bin 0 -> 42568 bytes .../test_google_generativeai.py | 337 ++++++++++++++++++ .../test_google_generativeai_patch.py | 24 ++ tests/contrib/google_generativeai/utils.py | 148 ++++++++ ...e_generativeai.test_gemini_completion.json | 43 +++ ...rativeai.test_gemini_completion_error.json | 39 ++ ...rativeai.test_gemini_completion_image.json | 44 +++ ...t_gemini_completion_multiple_messages.json | 48 +++ ...ativeai.test_gemini_completion_stream.json | 44 +++ ....test_gemini_completion_system_prompt.json | 44 +++ ...ai.test_gemini_completion_tool_stream.json | 46 +++ ...veai.test_gemini_tool_chat_completion.json | 82 +++++ ...erativeai.test_gemini_tool_completion.json | 45 +++ 22 files changed, 1248 insertions(+), 7 deletions(-) create mode 100644 .riot/requirements/1025297.txt create mode 100644 .riot/requirements/1736f46.txt create mode 100644 .riot/requirements/1b72277.txt create mode 100644 .riot/requirements/9b83479.txt create mode 100644 tests/contrib/google_generativeai/__init__.py create mode 100644 tests/contrib/google_generativeai/conftest.py create mode 100644 tests/contrib/google_generativeai/test_data/apple.jpg create mode 100644 tests/contrib/google_generativeai/test_google_generativeai.py create mode 100644 tests/contrib/google_generativeai/test_google_generativeai_patch.py create mode 100644 tests/contrib/google_generativeai/utils.py create mode 100644 tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion.json create mode 100644 tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_error.json create mode 100644 tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json create mode 100644 tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_multiple_messages.json create mode 100644 tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_stream.json create mode 100644 tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_system_prompt.json create mode 100644 tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_tool_stream.json create mode 100644 tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_chat_completion.json create mode 100644 tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_completion.json diff --git a/.gitlab/tests/contrib.yml b/.gitlab/tests/contrib.yml index 34c314a901f..570d011f5bf 100644 --- a/.gitlab/tests/contrib.yml +++ b/.gitlab/tests/contrib.yml @@ -38,6 +38,11 @@ gevent: variables: SUITE_NAME: "gevent" +google_genereativeai: + extends: .test_base_riot_snapshot + variables: + SUITE_NAME: "google_genereativeai" + graphene: extends: .test_base_riot_snapshot variables: diff --git a/.riot/requirements/1025297.txt b/.riot/requirements/1025297.txt new file mode 100644 index 00000000000..8f793f5b156 --- /dev/null +++ b/.riot/requirements/1025297.txt @@ -0,0 +1,50 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1025297.in +# +annotated-types==0.7.0 +attrs==24.2.0 +cachetools==5.5.0 +certifi==2024.8.30 +charset-normalizer==3.3.2 +coverage[toml]==7.6.1 +exceptiongroup==1.2.2 +google-ai-generativelanguage==0.6.6 +google-api-core[grpc]==2.19.2 +google-api-python-client==2.144.0 +google-auth==2.34.0 +google-auth-httplib2==0.2.0 +google-generativeai==0.7.2 +googleapis-common-protos==1.65.0 +grpcio==1.66.1 +grpcio-status==1.62.3 +httplib2==0.22.0 +hypothesis==6.45.0 +idna==3.8 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pillow==10.4.0 +pluggy==1.5.0 +proto-plus==1.24.0 +protobuf==4.25.4 +pyasn1==0.6.0 +pyasn1-modules==0.4.0 +pydantic==2.9.1 +pydantic-core==2.23.3 +pyparsing==3.1.4 +pytest==8.3.2 +pytest-asyncio==0.24.0 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +requests==2.32.3 +rsa==4.9 +sortedcontainers==2.4.0 +tomli==2.0.1 +tqdm==4.66.5 +typing-extensions==4.12.2 +uritemplate==4.1.1 +urllib3==2.2.2 diff --git a/.riot/requirements/1736f46.txt b/.riot/requirements/1736f46.txt new file mode 100644 index 00000000000..0ff3f25b45c --- /dev/null +++ b/.riot/requirements/1736f46.txt @@ -0,0 +1,48 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1736f46.in +# +annotated-types==0.7.0 +attrs==24.2.0 +cachetools==5.5.0 +certifi==2024.8.30 +charset-normalizer==3.3.2 +coverage[toml]==7.6.1 +google-ai-generativelanguage==0.6.6 +google-api-core[grpc]==2.19.2 +google-api-python-client==2.144.0 +google-auth==2.34.0 +google-auth-httplib2==0.2.0 +google-generativeai==0.7.2 +googleapis-common-protos==1.65.0 +grpcio==1.66.1 +grpcio-status==1.62.3 +httplib2==0.22.0 +hypothesis==6.45.0 +idna==3.8 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pillow==10.4.0 +pluggy==1.5.0 +proto-plus==1.24.0 +protobuf==4.25.4 +pyasn1==0.6.0 +pyasn1-modules==0.4.0 +pydantic==2.9.1 +pydantic-core==2.23.3 +pyparsing==3.1.4 +pytest==8.3.2 +pytest-asyncio==0.24.0 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +requests==2.32.3 +rsa==4.9 +sortedcontainers==2.4.0 +tqdm==4.66.5 +typing-extensions==4.12.2 +uritemplate==4.1.1 +urllib3==2.2.2 diff --git a/.riot/requirements/1b72277.txt b/.riot/requirements/1b72277.txt new file mode 100644 index 00000000000..d30449481ba --- /dev/null +++ b/.riot/requirements/1b72277.txt @@ -0,0 +1,50 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1b72277.in +# +annotated-types==0.7.0 +attrs==24.2.0 +cachetools==5.5.0 +certifi==2024.8.30 +charset-normalizer==3.3.2 +coverage[toml]==7.6.1 +exceptiongroup==1.2.2 +google-ai-generativelanguage==0.6.6 +google-api-core[grpc]==2.19.2 +google-api-python-client==2.144.0 +google-auth==2.34.0 +google-auth-httplib2==0.2.0 +google-generativeai==0.7.2 +googleapis-common-protos==1.65.0 +grpcio==1.66.1 +grpcio-status==1.62.3 +httplib2==0.22.0 +hypothesis==6.45.0 +idna==3.8 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pillow==10.4.0 +pluggy==1.5.0 +proto-plus==1.24.0 +protobuf==4.25.4 +pyasn1==0.6.0 +pyasn1-modules==0.4.0 +pydantic==2.9.1 +pydantic-core==2.23.3 +pyparsing==3.1.4 +pytest==8.3.2 +pytest-asyncio==0.24.0 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +requests==2.32.3 +rsa==4.9 +sortedcontainers==2.4.0 +tomli==2.0.1 +tqdm==4.66.5 +typing-extensions==4.12.2 +uritemplate==4.1.1 +urllib3==2.2.2 diff --git a/.riot/requirements/9b83479.txt b/.riot/requirements/9b83479.txt new file mode 100644 index 00000000000..5b6bf3efb1e --- /dev/null +++ b/.riot/requirements/9b83479.txt @@ -0,0 +1,48 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/9b83479.in +# +annotated-types==0.7.0 +attrs==24.2.0 +cachetools==5.5.0 +certifi==2024.8.30 +charset-normalizer==3.3.2 +coverage[toml]==7.6.1 +google-ai-generativelanguage==0.6.6 +google-api-core[grpc]==2.19.2 +google-api-python-client==2.144.0 +google-auth==2.34.0 +google-auth-httplib2==0.2.0 +google-generativeai==0.7.2 +googleapis-common-protos==1.65.0 +grpcio==1.66.1 +grpcio-status==1.62.3 +httplib2==0.22.0 +hypothesis==6.45.0 +idna==3.8 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pillow==10.4.0 +pluggy==1.5.0 +proto-plus==1.24.0 +protobuf==4.25.4 +pyasn1==0.6.0 +pyasn1-modules==0.4.0 +pydantic==2.9.1 +pydantic-core==2.23.3 +pyparsing==3.1.4 +pytest==8.3.2 +pytest-asyncio==0.24.0 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +requests==2.32.3 +rsa==4.9 +sortedcontainers==2.4.0 +tqdm==4.66.5 +typing-extensions==4.12.2 +uritemplate==4.1.1 +urllib3==2.2.2 diff --git a/ddtrace/contrib/internal/google_generativeai/_utils.py b/ddtrace/contrib/internal/google_generativeai/_utils.py index d23467b7804..9ef3d2a96dd 100644 --- a/ddtrace/contrib/internal/google_generativeai/_utils.py +++ b/ddtrace/contrib/internal/google_generativeai/_utils.py @@ -71,8 +71,13 @@ def _extract_api_key(instance): def _tag_request_content_part(span, integration, part, part_idx, content_idx): """Tag the generation span with request content parts.""" text = getattr(part, "text", "") - span.set_tag_str("genai.request.contents.%d.parts.%d.text" % (content_idx, part_idx), integration.trunc(str(text))) function_call = getattr(part, "function_call", None) + function_response = getattr(part, "function_response", None) + if isinstance(part, dict): + text = part.get("text", "") + function_call = part.get("function_call", None) # TODO: CHECK FOR DICT FUNCTION_CALL/FUNCTION_RESPONSE TYPE + function_response = part.get("function_response", None) + span.set_tag_str("genai.request.contents.%d.parts.%d.text" % (content_idx, part_idx), integration.trunc(str(text))) if function_call: function_call_dict = type(function_call).to_dict(function_call) span.set_tag_str( @@ -83,7 +88,6 @@ def _tag_request_content_part(span, integration, part, part_idx, content_idx): "genai.request.contents.%d.parts.%d.function_call.args" % (content_idx, part_idx), integration.trunc(str(function_call_dict.get("args", {}))), ) - function_response = getattr(part, "function_response", None) if function_response: function_response_dict = type(function_response).to_dict(function_response) span.set_tag_str( @@ -105,14 +109,20 @@ def _tag_request_content(span, integration, content, content_idx): role = content.get("role", "") if role: span.set_tag_str("genai.request.contents.%d.role" % content_idx, str(content.get("role", ""))) - span.set_tag_str( - "genai.request.contents.%d.parts" % content_idx, integration.trunc(str(content.get("parts", []))) - ) + parts = content.get("parts", []) + for part_idx, part in enumerate(parts): + _tag_request_content_part(span, integration, part, part_idx, content_idx) return role = getattr(content, "role", "") if role: span.set_tag_str("genai.request.contents.%d.role" % content_idx, str(role)) parts = getattr(content, "parts", []) + if not parts: + span.set_tag_str( + "genai.request.contents.%d.text" % content_idx, + integration.trunc("[Non-text content object: {}]".format(repr(content))) + ) + return for part_idx, part in enumerate(parts): _tag_request_content_part(span, integration, part, part_idx, content_idx) @@ -143,7 +153,8 @@ def tag_request(span, integration, instance, args, kwargs): """ contents = get_argument_value(args, kwargs, 0, "contents") generation_config = kwargs.get("generation_config", {}) - system_instruction = getattr(instance, "_system_instruction", "") + system_instruction = getattr(instance, "_system_instruction", None) + stream = kwargs.get("stream", None) generation_config_dict = None if isinstance(generation_config, dict): @@ -154,12 +165,17 @@ def tag_request(span, integration, instance, args, kwargs): for k, v in generation_config_dict.items(): span.set_tag_str("genai.request.generation_config.%s" % k, str(v)) + if stream: + span.set_tag("genai.request.stream", True) + span.set_tag_str("genai.request.model", str(_extract_model_name(instance))) if not integration.is_pc_sampled_span(span): return - span.set_tag("genai.request.system_instruction", integration.trunc(system_instruction)) + if system_instruction: + for idx, part in enumerate(system_instruction.parts): + span.set_tag_str("genai.request.system_instruction.%d.text" % idx, integration.trunc(str(part.text))) if isinstance(contents, str): span.set_tag_str("genai.request.contents.0.text", integration.trunc(contents)) diff --git a/riotfile.py b/riotfile.py index d31956f961b..c0f196a92f9 100644 --- a/riotfile.py +++ b/riotfile.py @@ -2669,6 +2669,16 @@ def select_pys(min_version=MIN_PYTHON_VERSION, max_version=MAX_PYTHON_VERSION): "anthropic": [latest, "~=0.28"], }, ), + Venv( + name="google_generativeai", + command="pytest {cmdargs} tests/contrib/google_generativeai", + pys=select_pys(min_version="3.9"), + pkgs={ + "pytest-asyncio": latest, + "google-generativeai": ["~=0.7.2"], + "pillow": latest, + } + ), Venv( name="logbook", pys=select_pys(), diff --git a/tests/contrib/google_generativeai/__init__.py b/tests/contrib/google_generativeai/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/contrib/google_generativeai/conftest.py b/tests/contrib/google_generativeai/conftest.py new file mode 100644 index 00000000000..fe7996f0e66 --- /dev/null +++ b/tests/contrib/google_generativeai/conftest.py @@ -0,0 +1,70 @@ +import os + +import pytest + +from ddtrace.pin import Pin +from ddtrace.contrib.google_generativeai import patch +from ddtrace.contrib.google_generativeai import unpatch +from tests.contrib.google_generativeai.utils import MockGenerativeModelClient +from tests.contrib.google_generativeai.utils import MockGenerativeModelAsyncClient +from tests.utils import DummyTracer +from tests.utils import DummyWriter +from tests.utils import override_config +from tests.utils import override_env +from tests.utils import override_global_config + + +def default_global_config(): + return {"_dd_api_key": ""} + + +@pytest.fixture +def ddtrace_global_config(): + return {} + + +@pytest.fixture +def ddtrace_config_google_generativeai(): + return {} + + +@pytest.fixture +def mock_tracer(ddtrace_global_config, genai): + try: + pin = Pin.get_from(genai) + mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) + pin.override(genai, tracer=mock_tracer) + pin.tracer.configure() + yield mock_tracer + except Exception: + yield + + +@pytest.fixture +def mock_client(): + yield MockGenerativeModelClient() + + +@pytest.fixture +def mock_client_async(): + yield MockGenerativeModelAsyncClient() + + +@pytest.fixture +def genai(ddtrace_global_config, ddtrace_config_google_generativeai, mock_client, mock_client_async): + global_config = default_global_config() + global_config.update(ddtrace_global_config) + with override_global_config(global_config): + with override_config("google_generativeai", ddtrace_config_google_generativeai): + with override_env( + dict(GOOGLE_GENERATIVEAI_API_KEY=os.getenv("GOOGLE_GENERATIVEAI_API_KEY", "")) + ): + patch() + from google.generativeai import client as client_lib + import google.generativeai as genai + + client_lib._client_manager.clients["generative"] = mock_client + client_lib._client_manager.clients["generative_async"] = mock_client_async + + yield genai + unpatch() diff --git a/tests/contrib/google_generativeai/test_data/apple.jpg b/tests/contrib/google_generativeai/test_data/apple.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f921762ae0716fd8a21b0e4f00751a7d73ac1f52 GIT binary patch literal 42568 zcmcG!2Ut^0*Df3ol-{L_^d36WQF;j=gc>@8P(rAo7Yj`~(z{6SEl4j)mo8nZ2vS9= zR1r?p=Y8Ji{l4>E=l`#Bow)*;%&c|aYpq#(X7AZ^^YLZ{K%%a!rVPM97hVir0N~~` z0IuZYU<&}Ksc{2v006)(01*ZzfDv62LjTcXU;{AF-(d6~-CuQMbp2OxhuJV%xw;}? zjILH5_KXfl7lf4uy3NIz(c9j^1ICM1{H6P^@0-uqqV^shu9E!x&PYBh8y9OBAJoN( z-^a?8Uyx6LA0YkE$JGkz2=idHhS@nd%P{Y^zGY@~u#sUl6x9^abX9=aJE;1(!*u<$ z^q_u@Pzf96hq8FmK9W98u1+uyD@GqDgfmjoM~3+~xFov%tC^o!77th2-NsfDtf>5# z4EmQ0^IyhzdwcVF3-h_S+wluZNJ#Jt2=NOE0nrjbq_4Awl@HJv$?^;04~8NP33YdH z^>9GD`3uv^+QrjDhMD;Blmy(*R2UG;@go2AZ>~B3Z z2lTg5NDmwSe@gsT{p~70Gb7_)yecpc4_`GWD?1ocMNL`nZxl2tpWUxMhu?j_18(PF z>+v7_HqgK2T|M0qzgcad{4fN}3Fhp9M9T`Iss2RQ72%2eTN>?~CldCn>9-ET#+DyV z_!stH3cvhN@jMsU!o)_heX0W)D&fy1%)L9CB%gVBqVEh`Qg~`~txY!|Jf44%>-Bu2E&N9EkgdPiKMs$YTKqaB>Fe?ui zciG>0tP6AdQ$aY$TEoz|PA<+K()_>D5#92q@>_=gpPJ~p&A*fP4}cuP<6lU>hD#D* z;trc*;;ubJeXzwPZ#-rVf~Hwi}jyOc+$TzL{j5-bpAI!{JxG{t=y3?JzrOt z3O9oMkPau zw&OA}vT5y7km)wEp?TZ=zp7U>*%gT|v47=_$7@fuJ$qocRvcLLq`H_ZatKmkJ27Z5 zFKQ5?&X0V+T@EI!maRP4o|X7d3_MWp*jrgCMg{$p7-o=gIhCUCrH@pOT>92$E{JP8 z=W^;Rq(@6sNerJ$#r(#ZKn(^y?gNjnan9$e{{+IKH~n#N{9SSC9UoG=rrFMBLeunn z`plZd6PLEF)GhHLjfzdrAVlXZ(4#&THQ=A^8J>d6Myj8o56gUY{mQ)Z@Z~??C{hR4 zzV_c!7imtbkEc}Mn3PnDm7H)4bZZ+aIFvHK_jTJx^LP|x*RE~SUk&o=u``oglLW($ z%(6X4&xG`?;3$3jcB$h_8R?mtf5YCU`4qEx*0nsKq;GBVkvEYwkp=I_kW&nrX?(G- z{4U~Lu|AC9^a?NFQ4Ncxa)S*e)|+xoS(f=EPcr@ zGOM^*G1^G$NN!+T3EqY@Vpmj5!#dl~`WN<(>zn_Er`~+RpYtiocVq^_Yd>NPsehL+ zp&8FR06}IhNc#zHWPK7dldf6{{KP?fQ5TzLGBhwtgvu+~IXlKO^9 zgZUA?eD%av9tG{~(jEIxv;F_YfT0%f_~F}NhP0FqCPmGgjzTk=V9LwkcwhoZqvy?N zK9ysCerh6vjI7<#r`ffb>!*Byj{JtsMAt3V25ukv&DO@-oSF$u1k?@SsrrrVTQ$?F z<$A@^+e#;w=Q6R&H2>lu=7_8ATieenYfKpT#o7UOn`DaEA|^IwH0;DkMmmtv8~egz zV;k++mxgaQ-A2DAj{gc4VhyqR;S*t_$k$RcTOf1UjIOVwohPjxskS02*_3X1ZSmW2 zAkiz=zgXz!wvS~k%GGKEMzh>2XPn*?P6HwN!Ztf*nGlbN(uiT}F^PgNW5oB1HO}5X z9_uHTZI(J23l!~>M8M6@2hQH_?!Mk z!+7yYY+|Lqj73AM&24mFW+!H|Jkv{)kA0KJcBrUPO1KVJN3Gme%VyEd*41H_^`dUb zo6bdWzP-KJ(A-OX$kY*c_SNa2%RT1|LZh;o1$K2xJF%$3bd*2c!H@RqfM&=RZ|WpUzrI<*Cl!^7v)ykil)6uv6E^m zI`Zh9#Bb5#O@^o^+JF+}g?;w(cDH>s$7UaQeET{v=ZiI z&}cOJvUwq&jL&*wcPql%c0heD8+?d(W9QAA85yO--jJ5u!Wxzrq1Ixe-dn>)sRLtD zODq=IP2tlxHNOwpx{zA@V|~CTasI~IM9B1)7|t0{6?Hx_`?BR#wnBABH=996-;5oP z+Hp4rNF86|15|^2I7Fk^OgDFQ#=y90)XfUQ=APVQY>{z3*wUq6YU}-Hp*eEw1bQD+sF|wts>EMl;ZSm zsYkgyxJXmoe$=azolqU-8M2KZZ17n5;>p{$12bQK6CC*bXbbRz*Tcuz4Q5I#m-zB@ z^oJGt-`HAjzu(ri<#ciTz9ef`IXV0b z88hMP1#xkdC$||KT=qs7PCGC`$r@%;ph*LIuJg9i(9E3EjH9+IK0A{)K%<`qRL{rGyBV(`t(R zxMkkPcBcQP^!zl0l(SAWqds$SYh(}nvA5aGCK10}O@BA5)YD`7K?G=V#<>f7&&o2P z9BVF7Ul>l(7MrFOPg^@)Wqx}7w0*Yk7x=A28Ci?Cw>;=@LxRfnYZa(eGg~FV8%K6x zNpp9tXY4a-m4H;0X(==AjpQb^teL`P>f3A+;BBF4M?}V=zU8QcK4*QZWZu^oIaT9) z!!A{_eODF=v%lkgD+&#s`k_MKQCmq)KZq?X4_?m;em&~K)y|fgmdlfWx1gw%LyC>+ z)!sPNK**&qhA1MtF6*QOqNX#k3331$lj?zWDv@zgW(8>*9&Cv%7e9d)PvWXWe#eE< zFQA|QiLQDO9t77QR}nWCglkaeD1n++udEms)Ir)hr)ro?3QHm6E9YVg=0p;DfZ?|* zdevtjwKNO*rO`8*ougaobK`<@K7Rg3KZz|!e!&t+f4_(|-!(1^Z=fl~*QUh5=td&~feruVLiU>U3gZCOMU)Eqj$5D{tt$ZMpG?<$$~C4CQvk@Eq5q zufLh}ld7wg-@`U?bn2T}X%I33D_|=ud?~Mn&x)&LyzUu<8hixNX%%Tu6Jkx~Fo4l* zS??|nh(rOyu{v$5Daz7;8Uq>&NlDW{w`pJhC26zE-@XHU+J4IN`LEPBuvN;>bHVab za3G8%inGE+P8zD*v}74&_#uyHDYl}1sT z{brlrWu#^NHh!51aD`Tnt<4TNJ7OD9$g2TD6EKZUzv8x;kz&pH+SeIKZbDGh2j#45+QU*rCt@jLbCOi+E{@nh;_=nxj4z@F< zZH@IXL24jN3|>7N(Xj_cs;Nr)dLA0n6*~q4X&JS%>B+MSbb>rE-jvAcm*fvl6KUV1 zYB`A}N+z>Mf7dwKe}TsQvkJ-`w*@XkjjBBLc>!dUhw5#1PO3tg<$6q(wfIa3C*kC2 zwhYC}5w_v^%_?Mat?g^p9zD$R3M)h*Btt$dn%pZe@FK9|mG*D1i8*qL-;L$JDyavA z)+z>JaC6riYwDCGL6{IP8}YgHl?F|O79&a?H0@ZA12s^I25FV`X2)5cWpC7t)&l7&sxrZ61x3{18j^vPFvqwpXwiLc1~AK7@D~a-N^|TA_UIGyr=Z!*ua>{zuR7~KnG)wRSe)a)EGu{zOa2W)|%1dffScI5{?+(IWy}DYy z0X(LE_8aZi$mXZb@S%|XcE0-gR5VBm}UG81`tl&@bz!v$*Qa)nKoccAfu>Bs5gGe z%@$d1+^=l+pdO6NW(EF8TR<6A!4^YS)B(XZ|^bZ(7 z`qh)Toa_T(qjC_$4K8OL!Wo34;~ryB#2Qw}q*ej*fJE>SvbIdc8`x+s39-Q*L>rq9 z*eBJG#6V6jeSQXBy!!)mi|YwAr*W)eCP*h!A3Se_g5@O`gjA}*$rVJC5-M@Io!E>; z3zSk4!J#R*%^^YWCb+;Tc|z9ugbWVN_3ujov>*Sn=+;obOZHfPwu7FYK3qZRQD|th zJOu@Wdz{H8NEJK}fvB;wCbjHy$QD3w?NPKdLJg3FaqAiQ=&=7i>5hg!7d{#b8@lPX zb7a!%AZq*^NTD%>9hy}34t?DW*o>5u;Mi-ZFNPr;fs|l4NkZf-x8BQwTKgqRM883E zAk|~al)sDt+=`RBsJIlLA1G7j(cuNI=MCti#w}4gAoc{9{nQ9JDHJtGrkNMTs*|m; zOU_ymq^bHKy?ykIVfpG+?D?~Q!4Q+*Yg1cDcQew{FM+G3sFzx)t3nu=EXTR0^?5S} zKzZtFJc2M3cmo5fY(EnL2GI@AkEDHX+&q)?wfPqafFdrvzkJk@#=U_pG%VMaxm?t` zQa(X12*3mb;XAN~QrQ%~K_v0mkY(J(N-*QgVBhupn%X)!F#eB6;x8E&2)|~H`GOk# z(v*5~?ZP0+N1!BpE`v9bW`wLGtp#j71`Cv+foxR}8EP09C7kCxX14kD`_ZN3pDFOS zHMa)GEHe9_IKpeW;5G6+Q@or(>s;IeDcfMmo*{$Kgx#|Lg59%m1(+0I=g9gifxHrgJ7oz?Eu6O;oj6)36f^a8tD92SJ96Ld*l8 zL}0?kfR|tO!`ENecK-diiRLCDSidm!lTM!%m`XNcvdR?%!E9A4nQ5N}aZ$UnN}6#sBXxdU5=BUjJ1dF_uE?ZJLHwH;WS`zbqN3SL)?q&fmqw z7fQ<8VhsYXq$Pm#6V*htTE`Z**LRwl9~hDSOBVpZA~E1@PUj;#2KDpPnW7&T^+Ecf zp~)ahRrz6q1zZ?*7e2_iMJ<=heq!|N(Cnv7|7`riD7KP+Z)RX&7D7b_+){CU4P(tc z2ZcnK3!CvJg=sT-ooLU5l))WsODpYDML?7w zy`V>b(*vIA1tokKK?9uu z{@ZSJ;*-c>TSv)9fkR=r;6j5!_2h6y-h`C$6h1Rv(|R*rGhU-w^l)J?{+kQ{c!t9m zUaB8da2H=&4htueOpP_cC@BF%24OFFsfzXNpP~RP01g%j&T}kI5&$b1nH)1(0`T%b zB!2yPhL-zPSOFM}=&%1?o}rumnREYL;{S8YE%d%6Hwgd}6M%gS8@+Xj-lzOkFfd7P zk&rP7Vlm2*GYbek(w1itwtR-($izX*U|?h309JfYj9di{p;7m*3@xHpW>Oh)?pC;5 z8R~=_GG-O5&Q#DicHaQH;7V1?Z!wc>_H(nn=gL@mBfc_J#2`ggr&&zLz;=}^v+`LM z$0n*i?|l+sZ4HAX?lKUDl1)cC{E6P}S`kesyI)u{ZGi;UTuGfyd53V8>KaSV7j<>k z!#VHdj2BG8Mg?5B>m^@VstFMuA5*c0=;(&Jp4zHu%} zJ2EoJbvHpIL&yW34-U-M6It~@!8H)D#AG7nO0d#yNF--o)VH<_X$fkdQ9g3rxSgJi z5If*w?d5(#e3`!D4zq|B}9wiQ(|5OGLza8zNR zI_SGvNrkDVX6t~>%WSgbb(Ks}N@r)L9_Jwz?xDNz5R*3CIi{F#b`>|$mIibGYB!G9jdbynt&r#G#-l~jNC=WTPV#Q zFQDi7bXhPNbE6tc+%{N!w6C8<`>6dGmLJ{zzPl%%$Zze!FUtb#%bBb`qI{Ba0*s2y z_He9~+4pdFHId+*8btJUB~54vg+#btDC$thFQ6h25;@Qz8d{rtdA=}xIx^UYf)J0U zg=?*9s>yC?CDIM85>(A@Y)IRiSm+5_d(HPgZX7OF%!E(X^ zGvE6ZGa3ImnB)}KobC5%7!BEjkyGc{)FHjA?Pcl=+vPkdj(f!U+0~s~4$;|zeLriK zI$XT^gfw+eTj!P^LwBsU8wO9=Ih?toLbT^7V{FUVfQxb9u(|5_rI-5D&R5F|488-E zseJt0A`~-^W7QkQYkwN_OvtB{d{0Y$*g5l9i#R&(s}xH?=HPodb)y^?Vw)z^jzJGI zGjDt5h{!sZ-iGdE0oZ{FZ?{Z*us`*qKkV~kyXMa8$HGttvKTg}1p$kzCKrSIW3xk_ z=6H8)qLlN;a!8>+{7no(N`D_}VFPD_>O*R zP@|1OJ}p@k+g`A8Mv}y~+2??t8ud^w;UVut&ci+j=LYUb?y)fk8y_x@1I$PO)NCXuB z<%|~cEzy7d$;7E@bgSt0kRR(lpDH(xPaUEvs=l5bnE){(j-`(!#(8Ss3W_R(K3RWr z`S|TfOalSxfJ22NI_=2F^R}UluC;_?xXzcOo<@iStb69WO;s{;WXa4GRgzF;^N?TJ zP$~@0M%^nSh==7Z=~R5|`S82Ul2=>)X=bwl7)FT8EHKM4^l_cm1{k&6cdFI%&E1UX zeO9j&6{}L}?f0>n*=D;!(tCT)DWY{x%M_69wZ`bAAe+G_RaO6Wcco* z&L06whChWi*@n)y?CoCAI0Q@+8W<+iR+j3peN;IaRQGhEWFDpQ_Z=MLw-#}J z(Sd+{SiU0j?~?tPCVjQ^7_D9(*U>OEz-1~F@PLVec}dKv&U!cHYY^X*IVd3IiL{fN zx!;>F?fsWu0;V&mlMISV;;0|GWZu7yIG(?z)E-|yZpy~#^`@SFw}eOO8d{j^UYUiT zOv%xpmIrhgks90zIJbc*q7dH3GHp#Q4knMCbM}v|dAd`14-ZQ9{8`P%SRq40Uip%y zU-G`|meY_mPVXujp^9=|4SEB6`$;)YZ5k3%uN=N{ZnLm6b7-{R0A!gbITn+vgSZEX z`8l|7)_CT`n2w!4v*w+dGjE?)BtMwRN@kEiD7_LNJa+LDBaIl=&w44Tzjtw2XjBmp z<8N+qrD@yCJ2|BAA&#G|shLBaD`Gbu!dop_(J*w?maTK`JEpZrZKvLFaryqomlq4b zIqr^AkVwT^U02vL-s!H-Vq3msLQ_IYMYho>i1XFjCqB*o7L%8fo;LuhvFW~p zPsAIQ5ne2y$pnL`iCFW(&#`fA#$8RVvn*jg3qTWM9#W6Ic!)A#zkXcuAZeIYS@*>u zWr;O#8QuyrTYiq3NXc$8uQeBr{@EMBs%X;Y^%udlEYa5Rl7abe$$95@PL^*EU3@1K5@ zAszMVKFr@;{hF4?CXT(I#_3}d^H$bLVM^{gN;*C3>CuI!t&t6_er^nv`D*6eT62G#AJ(?RALzvQskvJT2vh=4)2$tbO93 zyn>A(GVDTM?Xy<6RgKZvWK^8|@aG;CQr#A~0f?+siGL{i!QZ?-R` zwaKRBZcFz#GHF)Oa;h2?$|rhY)vx$aOg}_Rb87XqxlC7G%DPOvGw(i|-x3E1FTWov8X!Iap~E&LfFul)nHG z%yYmH2Wf zPbCJ{qvRL6gI(@mTUD>AXY90S-Y5K);A@l)la%)AK&r&sy|}{>98xoTSb=aCJRoPy z*b7WWjlJs099(veDb-Q(V)M-hO!Fg=7mL`-)HeMFl@97X_a6K zQ_>DixR#>*tDx_OzqMU?RSmLW3g$8~;a`Yoe^?cdWr-+oqt|3lRD1owK_tnrv$-&e zK$`-aR8fbFjEuATi;fZzdwkCadl6c~J4JqW|M^iV@RRSNB&q2Qv!@_LV-(~?UTae5 z6ihgXWN6+FmV5o-FAcQdug@>-Ur2w6dQ#{p(f;BtnE2KDTj7@`djk4q`E9upy8GWB z!y5qC4FLVP!-GD~fr)Vo3lkIb7W#4LlujTqg=rU2hIvO{{vMk22Sra2Bna}6#SX4CV((MLexcFMo=|ZJ;1viVB6paB@<- zkFVYf(Ia?Wn5upFK}cUeQ=0_s$YT*!2Od;&Kk>j+Xku*YR}ZT~SZyn3WfsmxU&Ul< zMlY@Q8o8!b6ya)E1B6uZ5TzBe3p4MDV~OgwDBk(bR0cuV=-UcbbG$z%HDQ2XC8H{I zSRp0tDW?T0&#lB@QJzL6tp~nTTNytS*4;g!g_qgX_7TS~B1s}^xkWy)5xty;DsV!3 zts^<1GzB_cN)+PBp>!1XvE~!_``_%2##5}wkAx-4`i8^Y>4MT0(wJWCU{qFMDJ!9D9*R0786|P{tr`oT0 zP&2(Dpm&8&ZXYZ``;29YTqw<4E#6;zx#()&4g9_$BlnF~UVj%)WXlEbak}-X@7#)3Z@S>Qy3SNt9S^EinbJ(ZMIt@C&}R_oKzJ=)2?#?`j_4u8&b|loKr8q(Wu%=fiI> z3Y`OZo>rfywD+aOd$U%$`G)2BF51SN*dJXVYhK;}wX=n8T6gdAipY+! zjF{iQ(`n6%die3YTQd9nDU%Wi+f14D5QD640HAAWycj#pS{$3FZUM{@c-3x(nuR?ZM#o7mzUg@TLkscyiDF9(e?XPNT@WXD@dlT;0jpF~7tmR-+; zH~rav@@3XT(OrBIwbyoHQ9AMTzJfnl)sR_{=Y82ckDmRVR4?ZjJKqZroBhgbpJYl- zoS!5XaX2oSef_4E;PvxKR)=gTqT`rO5_NtGu*F{&k`KD48tz zC#{FF;rAn(!l&v@EK-*sHf}ba%kZ&}s2`f+To%*H@=xWsbhs>LN~!h+J;a_`SjYSG zcJ!t$xaaBqkRtyCOR;e87kt|=1qp2BZBLPi`8k~tqnxi(G6ffxGh;S!HQ}wLp_NQFPU3!#zmcIl8UQfE?MMuY(Z%wx851{9s7t7 zGnvowk%JlPX&ORt1?tHn8F=bRTw=H0V1I}zep3oAR6lslJl%UA&wh-;{Zh{TwR~9O zjNVy>=!as_(GZOp=alN5O3q>mGyFYW)6ld!ilkXaJ76DGi+aYG{a4OT?a1?`$7w1_ z>)$&B(HkxzA(&CRs`*+b(WTqmPVyl-xd+czcLF+$?a4XSGoIzDze;o}X4^vnlXQ+E zUE)=61eLXuIT?epNHW|OEt%o!8NHJt6fpD&hN~bP?dms}j(S!>&$H~(OGPl0&P@%m zui(s8HWVLnNrJWSiX+6U1-RYu4OuN;2O9!jgWWQ0ti=aPUsE46y%x+bf1D+jb`S3s zLf}=vxAzrAmaoFLryH@aTNpy|vUyE#EFXT-X>!Imi=p+p#xiv}-q~I4gSxa%=PrIp z;wP66#Xj?!j5^#bE$P@j67D_zYGbcqOf^;!-X$I^B*enjawPOZdF8(U__J?vJGTMv zygYp0zgh!az7ev!vlsu-iVg}$dnZoVw)@aap>blZ&@6+dpsGkpYEVa69NPk8p&=lMNDkrbM zeB0;)pUYYVAde^0o^pYEqBcterdK zwBumTnMk#lS_iop`Y7&g6PxqtsNfWSK}rtH8t%@pcG~@&2XuOIEAy z^!Lx~dcz9$tg9@#)}MP__v?s##xH*f%ayIVm9Ni~CfA}aa3lkrytwU*8>#SO9meJ! zrbQDh4qH*1_}(IO`|}Mzp}jl44KuK3%Ory#Shli+OM&T4u)&)##`lHv!&Hv$ zjWIUx63J0Ji`=o_woMw<-?davOv1@62`I1PbocPqc+Y~Rbbjw`?)=(2p$EA9^Tc-KYB61+J=Jb0)ver!?@M06=z#&8v_zf_aK zIDLt1THTEh9Q0M-y=|!bf%Hhf%g@c_`N4z2Zk_vOG)tD643row5lNn`K)M5>i+fu4 zVco&(@1L!-_^6V^(mw>Z6VTg+$@IKXSrJuwEmY+2ecs*RqYs2M6i!^$KvvY0{qibo ziCT8*!KH8`&WO)xIYx>mq#L(%_d)Wm?^~{5$J9bNRq^PV{3d{)hN%pNlc|o!#1VxoX9U>ClBuZK}jf$Jd|4i zRqgOSO!bsDtOhL5cb^i*5FJg>P8Jed*^5P|el>~tpcz~F`)X5>f|by+y;$8+k)=oP z1$QoMS^T@fmE@8r!s0z{+Z#adGO(Q-bXWox91P-FV`cTJ7%vHFLK)V)R2s3BaY9czX_6hevQg>G1$W1J6M*xE72e_aqdHisU4 zy8Gw}mzM;6`u|tKxb^!b8~XhkfRss4PKU(G?RhTaqj>GwPJ!{y2Z#TD#rBZt25=zs z9MNmDl^*6L_3*q#A{eVL=94|X266xWE3?t!pFL{XWr{(VL$PY}x9?w*b)&bRXI>Q_ zUuqcsL=ygtzMCTY(_=RR(RoyR1IPq!MLzBF*ZmYyfBaM%m_pB*k$CBx#Pl7ird{

!H|&0H2TYInJVlID zgBG!n;qba#KAKasHZZ6|$WG3%>3R$86;=)j0T;XX@U)oJxwj z!p}lCmX_3T7Fn7U09)Af*MqRg>NqP03H&PuGlrQ1 zRfcZ>LFQ2ppL)B$0ZAN6G3s^s(g$(4GJLaE<4!DNNFpjuTI3_qFw)#EI zIH&0~ZcSKZn#+}jMCAGuu!p|#kp^^!Lx;hr{c%=pm&!$)kpdyZW^?94-@K{)Anx{l z>Wog2v)d?*ZyoF4<^ym5tIxq5Ui$rBnY0KvL2uwhFW%1Gj{A4(=WPxoVHOjT_xfWk zzX{LmSS6XBo|J&tnP#lE!TF}@%?U^u8hG_S>B!KzEn?4wKCxA***#89=4H;Fl4N-J z4ZzIjOv|$EaIvm*yo9%}u!>z$)@VVTSX-WMeb)7!JRiMioc0x;z&6l8Jqn+k_B2&S zy5}x4`|!~oXKGJ}P3>)IQxA0}#kx1_{DLcjJU(fwe4^=^dUuSn^0V`M(@t((b4V`M z0==Dj%$pe_Y~Q+yO&(d-za?iJE>ySVQA=!omTsh^w6-d-%-d^)FumN>KX7M z%U99azI#{8XlS9?x50q&nlW?up2kv`?vyTLlQ}_N&u|o>?l^T`Y~VZkpChW$JXt$d zraelgKI1*`{0RG};6>%ayLUJ8J^R=VjFO&7#4U;iZQ}h1y(dN)Q?U%yOcy&QEQ5;S zEc^bfkM+wJ=369g&Z;YY4s^I%wh9#bu(Lt@Md^<1=T4p7m|E=QshK-i<%;2kNi+;W zJ&-&a&M^Et63NM)>9Mqjw31CZJitXZ&Zps8%*scr+Doessym=Qimj`bM=yBX=waVe zJXn>QN?{|JF^!m8Ev}ExFj)%=?k4Ca#yqU=&QER-BdEb{i7ZcQf%6H)I}y@rnH}xW z)2P*kP|`0ff+50tZq9&F(W!>q5@6&J>GZW_W;XpEtfy~SfIutN)x69^Q2zro?)|`|K3Wn@b|v!am{5HEc>?^tDFMn zNa05}04dAafJf_n*`V=u4Sw+_W4hJU=gQBsa#wi}prqV%_zC5nG5)*+FAGoSl;|5R zZb-=Kv!*em$qk@h0GlB{z3q_H*_A2}GplAmxp2dM1!$wE&Wv;D(Bmg3-5F*}@UG5w zBMD~`$!yiNNI)>QGN^(Lx=k(~lM|1%V8k}{B}JG=(Cl&3Q#QB}{i_wnPFDS$5rs+) zqgU;XySv>5hk68TnasWWZbF$i01|F(M^ZnY_&9E9xTB@NGzc77Wif1CWIoBOZJGp= z)3GgJy`2Mby#Zk5nfg3FOwge%Sg#4U3ow@aE&p!4nG!CRJQ-&C~|GE2lkau7>QF2-cUFC2DP8 z;q9%-Kb#{kbPKN_INGt|Tk0HM^VT^G6j*49)pMpM_%`LV+!;6YOBzX63&t}T8)a#i^Q!^I>+f3??&GmwHWFZny zKKA{9>D*JYkPqppb3e$Cp*^r6M7!+jWC1nvpuq5n0B2-c<$VY3&xoXn_8(y%^-PC& zXp*9e-Ui*?4MprhodSCGHthGX7jOUUsU3x3%TDC*-&$cKjlxh@AEAV`J9>wTmv-RCbF*k7wLO{G%p&TYK(#p z@0oh^wDhI1MGwLaqo1Z}eArkOY%R6iyEg>yZQw&($=w&AA?>FQH51c$V4GWkbH-`* zX_7Z^m`*bCz;JQ!X)B)CH}OZl8~D6WS0N72=aEHO^Q=9~?<-r(^s3Cy(=Y}i(-B2N zN`WEe&PPo#uG`o!u~O$4b*JZqQDf0Zo!n-5vKGsp9z84Dc7lDi0UGZ8NVe39u9q%^ zom1b#~{WX4u=FR&KPBDwGl$^Vk?16VTaO}+05M5ar!=32k4}xxI zF+< z4g8;XlGUWg1`FOT3Q#Aq3eJ91wqfuz?KTm3=~z5iGd^5UJ^{q+iyhXZJZIx zrVYZXVY7#O`YUkeC=?(T{#IyJ+jw@V*MBpd=_ME=p4JVMez$nlQpNY=UdK8k{c(RiS(#q19$tO;`Gd91433aet+lIpu3DO#z-=xh-$dTW z(b6C@z%t|14S+V5`&?L@8mPmI>S-Gs!iIYkzB(gX5ObTFIy7@?ea@c>3Zow>@Fm9Y z`H^=6nEfv2#=|d|JcW8LP;5@F=qBullzO-qW!IyROk4T*iNu|#TIf{hB;58`{~}m{ zb22`Lqxtof%B^K@ieC@^(o_rkRYq6(dRb+0hdABqx7blxgA)( z@I9hFy`qKZ1;v9fr;&Fb#HAPgG~>rlIWG5xUHHPp51M2&3%Bp}vti+83<${#rZ3su zCua*mY8BoxbgRCKGy=f3?;I|268LKz8I3INsT955AO>nV9p57{0cp#On+|fXa_gQm z&B=TKp^#-wrPG?hnZU}c|d$JDmNix(+n*kb9GZk(eQHOb~5J(S}oKTpl%<)jC@ipj)Nd+P8M z3CfCZ=p19+`!3UETx$ZO@AwduNtYpB5it0sdn?$T-aol!e@a>DRk(pJXVK?(&?&B9 zyA8|tnHjE`E)nm>Wpf(@--8_Jl?I1%uycLG0Z2C{TT2pNw}|-eyAAK$t$P>NN>eln zycg;98}GZFP!tvFPD_S7AlEOo);3Ekbq!Ej;k!%f+nru}9(6HxavCsvA5c3NJJ-`Q zb8+ydTU{=VKIbkTA#Ysa=vvB-`G~EV<{K*a#L8(Tsd9`4mMiXq8SYh&l3}7-SG_Ho zJ-a>cY$>Il)h6yxP4H0|90TS{l^f)fwf8Las*80TAS`2QCe)Ak%;SzlycO4j1voF$ zv`V#DL#i82AOGy@}l;v?mF!8H2$=+bZL^xDR^QW z+J;&CdiW%kZ=;J|#RQ{g*m_5gO{$5rhLT?K(Q7ZwiWli7eD8%&-0a|Cn$(`#(w*)b zrmi?^i?8?MSIxSy4WJDg@`L@3`^W$#T52(!TvNFnQ;70J0g;iY5VeUES8ii{FxBj? zzzskw@~URPlPZo%bWLONa-74h({L645#|ZF2|7+_%0p>n%fh_B$m93Cu<}tQ-JZkO zDYL_iAksK+Y|V;g6+y<%UF@z#I&X0rtwC>$@hQE&B5Ml`gK3M{I1>H{SAyozB75zo z%&&Z(Swu`hBuk(wLIpng2R2qb6jfgl*xf;sg1WjPZTMk zz90#AH8RMJ1d(m(ZY#SNwrn?$9cuV4+;4|d0rHHXz)NS9r==KKKzTRLH zEAfvbdTqg4o;rWmsc!vMu4BrY#dF=#(^&dJ3D^leB4*LvhMMSq(e{?XZ7l1%uguKM z%nUIz$IQ%Z$IQ&kc8oDIGgHhcbIg_*;+P?3$njcx|MytXw<)cdq&bg zb@%%`0>8kP)3d}TSfpP{fd@?lj7FePWm^`p(wqHmsvqLuvShT_Ebx)f7RwnTJLDZY zS|pTkGOc46!rxcp4`fYPZhn-isGDM6x16lBP{&2bnd*nRrQeHnis3O%-NSVymSI~Q zNbQalM?a;W-lMJpIdcT_7e}oxW$e+-7qPf1cN*+-BXs@L;A-)r?u>#uNPW^@P19@p z12BhD9LMLv`BOu85=k@VT6Am< zc^tluv{w!;KJavSwKCya0!}o42$(u*4n0zz2U>3;s$+P4??ISnVUriH;hu8po|>$! z&jj5w#ywgZ5W&r`O6Y>?Myk$eN9ND)#73a^*uM9fT(uNVX*w{P{_@<9vUSl%sh++& zG-ZH1G4R4&sM0YCp4YRNbaNxG3*@-q!>K!-s=6tw6L5ZD9LnsKV}4i^BI_Prs;~=j zOWxXcS+5QS={Gn+PSHK^K}}s~HKR>x^Pw(JcsvJVw+zJ7rRM@W%-!_wA7%Z-YKb*~8QdLNzHyhCczK4GQ3ij3i2 zXkA0P9fYc3f%TvLP-N76Lu zNm{Y$<-B`swf4+1w3|Z~E6u>bRxU{-rIt?7V@rd`nC=Ye%WtL8d92l`UJIp1yEH9n zu~-wKNd5=V<-rr(CU-ks6iCha&>sR=ME;dBKtEPqbh=nj9V)T);tZ0WmZ?3H4u zxvSU>91Ba=o%e|s+bYWlD5t^#aV&YI9u5}@1zKF<$rg-7s0WH82{E&zFv#zH5_i7# zfGXOlvF)+fD6^?y>Qc*|jq;2Q-(n+PB^hS7gu)44m>OZfrG;#cV9f4-=X_wn zzjR&NWnEQ;c*CIXwXzktKau*3x$CbC9r(W5FK>KtQ0S7 zo~|b7jS}z7($Ao*CU5aT|9kY6y*sZcWrYXP;Ztc8l`A8Y)*^yCeef)Mmk94hr6g8I zIV=gJXY+|*@-PXCb3snJU?J0RSySLtnep}>?W7d2dp%*h59I~}BX$84c=&PirRYY=p3z; z^|n8sIfRsQ19C@FfG+IHjvv*E>A0uRYT-wk9O!IkJoMdz(g<>8o=~^4Ntg{=Jl2;d ztpwFbof-7XGjd)tiI=psbGzgW@#mRjf039;Ub9d9P6fL713*uc^3B~8RWh}2gSC_m z__5abBRWE8&r1M=2|1v3jD}1?__!C96?HT3e?Tf(C zfM5{$Zs~^xrQ?&SnR&s7m_1q;$~0m#x?j_{gc-`_WT>F-i

tGPK4P;Vqd2Ck7>zDh{zEP?o!m$w`fw~zplGmrQR|pAlnlObi4cvScz!@96EqAPO zve(sHd9C0;dK{5lt}R_Urz`7oOih~o#8bNY*da;w1784HQ(oq|GYoeynYAk|UT0-O zIpl25AWVlzB?Q50nAX3Cg|U4MvNdxDCDI}HU!}_ctCsES7vHCBJNuLz}ln@ zWd&sFta4}ItCv>#p^D3)I-Vz-G29=W zosMXH5he;cHG5cUQGT<1xE7*Z1jxq0P-yz7oiDBH!oxE-E@e~!-E=1g@;Q$dg@#yj z`ZY&BY4kc`w+ZC>+c`ucU6F;nl?#{#$qIq2R-F~H+{mAx^@XD3JpIWx;2IrSm5qaM zy!2CHE&xa*-4dE?D2p=__N5F`dlX8?A_vXWGw|PTqarlWD8BC~xN=Wv;T#2BiA3yL zv1w4rKZ+V>?cT92l!_Vfg{>r5p_lq*HUOEjj`dL1w=ZSC^UEKXq-GS=89Z9q{3ZS_ z0QHPiEyHId5`MWHDbw`&I32U~yRLAD$CNLMJca_8TZauke*q_m?9ezey;7L1V1~Am zuls`M{_;wdhhAo-as|E=)@r1Z+9Oc_vLaSE_Ya5-Hlyj({xJ=`k+BmIr&v-Uj?-ai za>;&*aSg_f_FeXRsyqM-0}Ol@D%w?bJ9>HhnCDV>xSUN8sFCRh=e>wGME|8!4O1QkDx5{&O-8}gT!R}*l zC;%fGoj+$NTY6&R45KL!BgqE+4lwN(vmUUi2SZWq0mlqJlwdQl`r;c{RNWwJMqS8} zqCRB%z~l|G?R}4zgXf+noD!j$-Xt*1p6g7z@K=F0t_akJjjwz`t>i8AdP)vz1rGJB*z{Z5r@FyiB0sA0kdjii4761^JfIwpK+>|P6H z6eosB*-l;pU<|CoLS$BG4j|FF8*o@uQjuVk|Fw!1EDY3{He^d&499kf5xVw555Q_2&OA&=k z5|Mq6H$-F~LTq}>M;Y}q`$eKe68Bfd**HNmniA3!Sjvy*wR-(84Td+fJU?M$WS7!F z~@i+<1klx|05PicX_=s4X_fjRTfGEGvx}^iN7| zaX-E((u1NZ6Sx0~>XmEmpYZ-nJljX( zsz7#uj2LP~>*FbS5)_NAITPCdKp6hIHBBx>kc9i=2+PU_nj#l```=b5e^tOPi_0lE}Yc;+?}7fDAI$p0br}Cuf$N4pMFJ=IGy%oRC%tb5u;0OLBcK za=`NW+avoL6XAbBaH)4p^1_zSTdJNbm+Gh)I7{K}d1=o7f&@we^}sF!WEHfZZZFtB zdXmN_u`c2Q|FucVq3rYmbx!P{D9-~p3CBr^&8K_D9kBkt>Gsb?LN?PyHtKM~{$W3f zE|$jXcj9)ve?^LNk~`DHB7~W%TfP#mb)kP%A~xc0NNpX+YU{Ps@&~}&qiRW- zQtsIQHCP~R@o$40K=Vrh4v{`Sql(2~*7PMtcg*ah9AiFt!(X9bp4@)`mA!*>VdXIr z)UA6AqTg-sDDui-^@xkrlX>Kw_q5?ma?|+%&kCpQ>j?ezaV7sLivJ2hQRH} zYviK1?XXZ6ixk923y#QN5V;zRw*U9D^N1#U=pI*T&@nfvPWmL6=U)tfwkq}rn^yI` z`j7}i27eF~xFZwL{5jg~XuD|<8t4Q9vNha8eu6n6h(Z{cr{{EZ*ma0OFy^UZY z%`gOI#(K--OfU%n2xHu!zavG~8sYb#Wq(8@FnuRi&3A$hk@!L%{^xH70HF6zK)n?< zo9CI+=%L;^8bNb$VCcfN5G4IqB+;G?q$$KTjnrVe^zXn5MxG^&h+m39|AKru&-mxx+wteW z{)R~w6u$GcJ+90A{x1fA_CPPIo_&j9IiZD6)t1+Yr>s(CQ~oz3#MRRz{c2!6Yp#5O zTd-AM$_UOuWBhLjp^eZ<;OBFYb)C9MyU{9nO!~9m>E8?hZ6kK3sZ&==-T_HG<6vTm zZnF*cuW$CF280e5?BsClkHC-f0YC!mB8BX>L5F#snuiPm{BMsGSSY5R^bN-D zi{$?YnLN4(WYR|qGLZjp4){kn7-h=m|Lv#x2mAlH`j_!17203Uo`2bR{*|@+mrDL4 zhX?f|iHB50MA?MZ**GY;@jrSI)DOK#z$vx!hX8&8%K;+)m1XP3XQXs4a3{0*UE-|(8E8)mJ(N4=f-dNRjW zDk8;kDbAg}M}s0a?-B2RHOR6gCo)oPydWkt=&n}aB!5po%QCjZi|n4!>))+uU|z+j zE120wN3nQKmQMs4+$_i)ws*I_8wQO^Cf_!c1 zFC*hw_&F}Y$Beyg&sWkRSgP@_vZ!?q*MBUMm~ZXAi%bxVX3QTzr|^7C2)wSqv-w9s zY#^NsF8^TcSeuA=ws1&ap!QDLKy0J%v`bu6@Y&%dBAGc^Ne-hWOcVY6@+-B@J&t(7eHptf%-EPNV(KZ-r%d zcNiYNX0q8>5ro~4cb2R8nr!^yej;WlMvx>Jt=0kcmA^6FY>51H=U4kfKlzU{htXb< zJPwfkGUBO2$q=mD22*Tr4;%&t(1)6Gg7uv%@O)=*R11{>z)AqUig#L`q4AvVmNYRn z!S%E47ufGOAsPR$he{%xZIjcAN#a_IRmXOT_|Ecc-=Bj3cEJYfvVb##fzsT&^KNy5#Lc?o?SR*G6Js7Dc7+o>3U>98 z+=2A&rSH=(G!X{G^$%ulk#$&4thJkGTp5;%3Zy12?zS#NGkje>=5EGOw?G5+C!W5hI`umg*HiK~Kk4EKV{5S1Rj#(_;RE@RA_Lp~9TM2BVl_i}F0#p7pO z7;xv|FO(C8bu*2e5O9p{1*DZXhc8IS%6~aG$NL)q85La1)2lJ29=f|B)M*Tzj{XC9)t+qktQhei8DNWC1yJ6c zZBUPyd$YGH7MuWyk~~2ahlgPsmVTL}Z`+KmV}2RhvfeDNC@0w8<%dYU5mvoiQ1r|S z?apr&7%7JYLwAkQx+O!4@3GI<)-CNYmjhi6f`Kx;KfoG)FngM_8&cEgbkV59 z!Zg0=UV@>Jgy2fUI(3=nEkCqj8_o~|Q7l;2-aAnxPecn^8J0HF5z&sR@bkLXbVQb~ zx6?7Ke(OT}b$hUHMFNpl)@HMR*&VG@fEzxcc=xWg(Q#tM%pfva2JCmd>C#-W1-3W| zT#{I%c#XcFDLo0vfz~Znl>#Q*_3U&7qwoOdmTU9S_r9n=q>o`$jLmsFU81^g#3j$% zW);kC^NjiEg$M?tx)Y8~_j)89!fX3xG~Y{?(l18GkJ9nqp+);Q5YO0_dNAD`fGOER zN-p)9%+qs051#!EzPQvS&f#Zeg%fY3nR6HJ@oRl{%g3X*cY=ID((;lQ))y%EAWuhK z@v?2_{qxJlCwFTZ6U(2jc?W!}kAFV$sSpZZLUS}FG!vf&Nusat*!pL5nlBy zXsVUTqoMIxc&8~i4$(#a{`xOoASB|Gxi4ZMBk>!l+|v!HCgm;)oge9>{aD89M{L zwVPeYVi6gaK3K33^{lzm7y0MX`70oVFyAV9Ct~f!a;qV>HYo)8Ixjcm z!Ak3##nY|<3aId(#qDs)7?kUJ6i?rr^L3B!j?MddS>tvrh%FjH~ z%3^lFd+IRK8+NI*6L-RLPZJs(!Ncz#CD#g~J{svA?d@%{S8~Wj_e7w;UAlMj4RuGx zolKLcVD14l zi@s~jIl-1?wkD8Ja4$JD$#Khxyz}OYYj^c0JguLA`nZH*zbFpZppk7 z7e|brIA)F#Vb_f*I*CJqLVtnxZZfT%ny`2aSZ4sYDcYu(j(pXq+tV z;AFjDA?Aj#uX-9sH~Kg1=b_R3_AimL@%(e3LhE)c(dpdsJrrol5v(em5?~zLBlU@P z6#f9JrT8yKiB1p>@Y?XKG zZ6o5vj64Ko1mo#--c~$Ry!9aT9X33-)2!{3E_MY%G)P1BUik`ex#kB8lHd3oWb;_9 z{sB0)yPkP2qAkTOMmQf_j^i>0ZA+Zjw>cd6y19)#IT-LI)Mnu2P8!Gs1_QZYSciS> z-tnvm2Hgqe)7Qeb1T2y_6YoFL{IPca0PNm|O_mbU-O#89Zon_(n%n%MqEDvJQoJW& zcyxR+BE@vKcZ}OjO<@n3%x(muZhZG0?|D5P+0HV@8k}#CNd<=l4tQqnF-CqWZ}g9U zN+>y9T{sIntw=Ix3v}7=X`*@L<=(pNainTS-|%~d^nP@No`6Ob-KX&&7b-Jw&b>2h zoNtWJyaTL+CxHF(|9O9EexY+2Zt+UfBeTytJ7vn_x3hbgaD@HV9L&ykmT<&y2{wNH zTjVPl$3*w|jlsSbN)t^xwSn&bqjpA#G>WbtAmq;K6l|70Pov-sJ{= zzDaw8-hH4S3`{PL%X{(r&yJyLZ!g~_CYVQQO$G<9Ar=_OtH6F0YTy?X&G#ZW#w6fZ z!KQV}S!9($KC-tV_l&Tz4eTww?L9q=$Gj7uNqcBeHfeJT-@Z>&wEg};U|U-vWM75L z+3eGxBb^VBnw=OIm!l5v(90_PyKr>U8qJ2+s!vq2>BQ?Oxfjrzo!Dc-x4q}*MzvdS z6UKPrSLPdU0j5}sx(|Dcv;2uh^Bj^$zeoh^O^F0umirCI^fmAEhy^O3!=78a*9KBv z=UN`tRUpClZiwW4`^d-{n49t2MB_Yx_6X?%=8JOk4aT1RwKjVS{o19rsSQ*6jX)Bi z`HAuCp16$JAum4rHk#mAv$uV(Wlw5}&9c?>%lIb;`I%Yx$>Vnsm4!2=$g5Q^{`N#K zzV?(Yzv!Llggw7(7YK_U4#z#$w(s&a)Are~xjKlf?~sPhJ%>3sIc63OV;+QTgV*N~ zTX^<)iUK=NtFeRMn)&eo@5jXj|isT1Vg#xHM{0T&q)-bt0^T$%faVBzP3RceIh44J160Y ztAW`qq==2Jntb~vzP1hX2>X5e=v|AwmGawLcN7}unPKN1yX%L+fa~${Duuf*0 z(Q78V`A6VhUNN<8apcvu2_toT(S)}L!h~mnd%g!Mx*xX7vu=!Uj1F_nI#1X)4>(d! z|NNMXjL-c8P({xbpqLoF3LvnPW^iN}z3n+`ATsy=0H~t)MorXLT*HKCjrI*2TpN!3 zc5RzIjAk@Ge*o&zJLC)0Lv~K*&B=}gL#;jWZ5#G&11DNVfz=c@cnC;6=Y3K3f0b=r zFEz;tQ~KwhN^JxZg+uLdUH3(7I3hZ)+NT&5X>E7~)@+#kRdK^U5+R?9QTtWTPWxM% z-bcX)`MZh#F1@+G{=4M8{{KM#c^g>2#r5Bae;2pE)c$K5`}ThTOMxGKR!X#w;qSj) z4*v7E6kuqm9}-j651RuOlaD@Y;on`>zeJ`VUDp0hlv2#?$L151%)`FNzB|au8;bG3 zLDiyKE#t?}7ZvZJqVG)LwvJQ;OTNng%GBaTmmk5~U5J<+uWc2q7E%vPz6S5+C z{CKP~)>eXKHy900Bh|r3@w5V7R8T)sWa*@s@X5M{XM0$=pJ}&fGq;$D;#{Jmm;BQg zG~#VXnVE@VR>N!X8FnT$Id>{!$O97a(HK8nM^TFTq2l%xQeRVw_dl5_6$K2hX_%S_ zh&m~)JtEClM=TKV@E?mK;J{%Z$0zJ;H=#;xKXOWC%-f414OrsnBl1j`Zk}QW4y>#u zJF_;-onUlW8zq|1{^tGA2SJ8^*gK+=%|);k*3T6#4bdTS&woZ+|44wZ$jOmXu5djz z9VsrX&z0@xZ)0msuE+KMvjQ{oDXbV^ed#OKLtn30MF32dxwrt z>JTG+6q^Q;IpdI252B!iRToKL$zj03*$3)`@rawLn;yb-S*u51Bl$0&1IwZp?F zf)Gyzrm=#%yd*n zJz)1M6OrROY#5$Xi|^D(X8_cJBclF(dx4;(ZiP&l>N6w%)n1)1*)sE`RdzxS7VqGo zwl0Vq8TQ0&EKtVunz3=0E`pK?L#c4Bzw4m85&zO=*C1p_3_X^p=Z(|fiOl2(^;>Ji zQ^**_juIW!+%oRO**wxNQ7^u2^QV`Z&zl4zci7R+V7D@^X~(|31YI%P!BGK_o{5;) zn$&z~+cQt%!`FPCo{8auI+?zzOeCee9r|Bk&yPRNoR97LS=6pJO>(}%ZGYa-5%VE* z(pJi3bUyGAAZxV>*1ee3I}o$k^$~j1&XR+l@l!&uZP6(noc6=^B|DTgA@YaXpHs?8 zXIxt5AD)Z>0yRWwi*C8}ITbO~oeV=Xu@hdG%Y|qK26wkZnscT;%6$;hrGaP;l`b?T z$pdcGXICpA&hyPZ46kv~ByC@IF?W;W9=h3*o_WWI3C^FxSLud@bP-%Vh`KbyDayT2 zini+pN7f*Wsw1*8*dMlRwyQlH+Ifi_QjRSOw8QSrQI|5#O}8g3^A>CUOS<=2 zj-y+K%_u0ts5#LJ#1%#=RfVK^_i)E6;q2zWx1vEb`a@W6lP#joC*|`d0xhm`v%xyE z;k2SBtGTztjM%98{^nIgyfxop`g7Mon;NbC8Th4~5|6atP;Qgrz) z$CK|L-q(9ci0Mj~i@Y{6y(Ie}5FdJ8PFvk#HY=!vAy&Uy_HbIsF+4JaAK0Fy?a1^& zFOmJ4zD%PRzp7PGN@~amIlO?8oU&omxsKt9lMA7NCe4M)p_9 z|12+k;Z}DtNClvk0cVO z|M-{wb+dcKLkHL!NSs|3IJ~>l+ae)q5D75-19?*$><*6&T@f>2KbD z_ygDjol)OzPjaLTYf>1Y=EXN%32%KkBrEeNy`wFod}uFaLVo$fQk1`AQm`racm*`A zA9S}h_+DdcXx#NZ>42my&O>%yr%4LLyR;?~I zci15F-$WF{;_USG{BJDhXhCgh?~j?VI0X4s18l7fU_9n+?!=cW1=#qIcuoFm|F~Xt5z`7fun%9qVJ3y&T~V++nFAlJRrBS1@x< z8yrNChXKc@uxo5R7weKHf2dH~yi9&UKc|%VJq2ca^f8vJyBEY#W*Yh!_4h>}@i;bW#T3O0y9dy?xx__HI> zqbb7{p~qB<;kN|)ny)q~Aqa(Qcqd%M-vfw6H{bdS%+|eT?LR0%T*Q+f><@Da_Zv|w zKBrYgu2y~#ue#WGh(!ke0Vrp-!JnM|wxzw{OI6TlCJAG%7AaJ|#U-;nU%UE~C#$OZDM(IKEgPv`82e38i&L&E^5y8I@qva-cH zge#DfG7g>nrMpJJlY>2dYm{W;7oFXLv=#I?6oeQ+zbwKl0hKZ5s$b9JfgOj}Z`7&u z3%?2G`t%)>j+SzjDd(Psjr1@PIdtM4?zE}dCq8mbgx@0c!e}NsH%X&40r7vPUh^a@={UYoyyu$o`)<)x_$W?1;__=9WHLkO0oI?XsL{wUGh$D7y zCSh}*^hM%H-6n6<9SFD+v?{a!52<9ld={3GQ(rKqXtihH;}E*f;+R^4z!`TFs&U!Q z-jiJx2Mzh(pE5}GMD~rgl^bkXO*rVcA+(^Qj5u&zbI=!-EA8zQCN#zv&1zZrJ0;mh zJS@wyc7XIFXhBD`v{3H!;Q*}Kemd)cPS+EsPa4|6BQZZazj#|wmK1CF2Gv$|R+`~i z!i%(aij%-v)5Ud65LMQryc!vwlo< zfs=Y|pcGI-%DEzqDgo8AE)K!B2M#N6$TKCv?cMR=bp(yx3#_er9o5Tz$l~E1)^zg8 z`Z6+vH92{i$#0uis^5AeTWL1&t%7uN96IUpAXwGQo4nnux%`%X1ZpxS$IY2F<)Jzq z-YB6I9|_+_1O$0;n7RvO&%v&z)U+{NpPI!KOv8buW|a8+?dd803dTCPTO}qY9a)zc z7U$EqHGHjh<~YQVGkW9C*P^ zvaFdhvC6xO(w_kBj!@5oHr9==1HbGW5Q*`KU~9oKcmPLHy3!2}TK4=W8qVPiBzBlx zZq9U?1IMZ6Z7TCD&_`|Pztd>7_3I}o zoLsK`Bpg}Puu}YYe%SI(55_7))ZtsBL?>EYSQhL+buJc#VE>A2n!)d)P_fMf4nwjU zaH_YR#MUsVEG<7dGs6${igYQzC&hWonM%$OCkOOJXL2&R^?S`S6Zf%1BRbnF?0v!h zN(o@Pzhlis>A^uY76~Pt6lKHqHkVh)HSH+SA>J1m~v3)#8X|~z0kBS zr$x1^t|A2(p)t5_f}nxdj6s3Wvo6PP%A74wN1gSo%}0dRdYCbg<5=jEv&kRNiba?;mv?txvBIMoO4gLKWC*gU7Y^oMuj{I;=JclS3WP$@$-oscURjVz5x*cAbzfpXs%XwTJx``RoJjlt)^E_(-T#J1e) z9D77qSo_RELC)ku{eGhShMx5k6sD*L^3Kh@4Y#+6Jr(a~cX%WgAc_s?QOGyXO6NCKjH(I-{Pi zT>?@kEBd5zdoFtaI(KK@uQ>crvlN={gQa9=F zjMW4MKUb8~TA(xxuJtE%W6o+&DCgw1T0t5i>BBhuVTNCi^wHXpt3il5RH@q?6(SD$ za`W9R@S>PBc7;sF_}EK=z#tAh`EA}%_;$z+sAunp>Odm76SkoaGH$=~NAhDdKJ>4R zwT(m4;g!B3yI)ny=%%kv0FtxmY~S}1C``KU=83AB2Z7Z91iVt0w32G&YYM* zSrVh`Rgb~C+RH$K3dXgMJ_1(fn;KI&z9hj+lb zvd@^zqEKqrXiTzKtUi~h6-zjcOMd;=K4Xz62M)l(E2~#Yg;@WTinhrRp&?Kil4>aE zOS2=J>PB?R5HUCErpmz`k%1<8P*E*WqXev2@{}QFTTE3V5IjsM2|xg;23QU$X&G5t z)C7&+^gN&yjdW!sK*%!Lgh{Fuj&{Y#(I~uMuZk2xzc&Cz^rZw{v7*WnJQ0~we zp?X}Q95v|L88{Xhe|#>2rbIutcr3%c+AGT)DC=s)Ka{BJs_6U@oh(zw=EENoe?P@R zg%q=mft87j`yvL!Fe&0ZL}s9=H2+-NFcf8A{Zqmkk~i&V8C;r6dD&(etfTeivbatBTCLf7%8i!Day zuyU~Gr#9%%>srcA;1jPR>R5HyM{tbVyuQ&)cM?Ug+MLI!2qx7aR6vIbJiQClkF+|bE@2tIG0g0kOS5nN_7@ONy7lHp3PzQa`g84@Sxi;%j;a^T?^ZKm`ZyZWQwp#=wusSn#@j#I^|{n2Weyc5y9?h#U7Icr$J|HIQ$IP1&KG8m+aEr9c5nU)mbu1CH7_ST; zaDJLInMM1gKAlzmETN1HzbHv2{vFRH;45{SHJQ5Tw^}jVptQatN;T`krowaTQsxeA z7>v~i1D;$OaT+%Io)yqh}Dnsk7LXUQcQy;Fw zEi0yA@MlyEkJkxQ!`aby2kan7fp4zN!*swj0eKckFvWuFkez{Rc!2!Rs0okWYer|R z*7_{F+V4Bdqz!I8z$2syyXG_R=&SZ5Uo);E8TbIJ4(9TvM^=k}Z@lMt2a^Pez0p6t=XTfmM{p zCN+KvuNW2*N_>rJ|E#`s1Zn_bN5J_TDZ}TSS>Cj-MjZj$4=;26&aB%`qzHy4I4zgT zT105wZ^`3_nyX7{44$(U@?Lw!jlMq@2P+5i>$fy;7J}$uaqxy)JoF9z0OYwU_`tQb zawj1yRhr=iTyHt>S%lCu28@L+j7f`h^S&C9$^JM@Zvcx^CD=ML!PM*DQCf^ys$mjL zQr;b9#$_^@!U@_a9#%VQ{Fd#Cm{X_SuDG{@{$!OIZEQKI?rE;2yawMmT1SfP9Z4S# zmS?WA0S?fROv8!^an#&dWY%ehNYCFQIJRJBu=LiKW!D_M#!8cXmPqfUQwUSDPX!uC z=rLJ%_HmG6FCK?SG?`6Rbl4%6A(F{hMs-A70{x_bR%v5F!EK1GD-W4x4Hzfvzh!t_ z``E(NSH~QW^x@&aYjgIeb#B?N1p4i-U;E23*|VzNX=sn~>x3^Wyhyx7%#RMSeubL3 zkBbyf4NA+Ccd9i1qEpAt$(7RAVcpaErhNw=<6$+tAyP~I-cUfNknxRTLdI=V1e%UM zC*@fgwWcD~AK3^4FbG={Z;NHCx#B3?zCcR4=sl3@Us09O;0~+^Cg(Rlgv@5P+b7i&zXLiu$Ci{ zy)fFHh3btjrXHZSrc2E{Ce95x%;-qeAEJkb8e~#9t2)X8F+7a=G0Td9z16=cYYcrV zR9Z9Y;))^Yu&hVHN#<_9$S_wUAY3y|7N&Z5$GmlRehUpl**ZuVGUZ!_kzyA|@{<26~;+*~zcr8YO zFL(8ppK^J_!+oP5G&1;cJFdJl6wHV0Tnfv(V)(`?ohS*7IHGv(oJN(sPIzeb&Kz)( z2tzXgldE_6Jto=Y_T3MddSAkKG8Q4HZ}Gco-RV##|WIcu6Gdn#g*&B$^>s{R7q_opmke`KI z4GS#^_e}PguO4-%wOQ?!t zo4ILABc0W;V*L}<5Om0)UMx;u%!NwDBEM}fW|WE)`f$UM&Q;4n?7vPD9%{LtTi+!{ z>*jt<+B&T&;)AbsBqkjKxgFl#5Xk(PXs|b_B9xS_qSl_?R2fhpVlhC4R71I^+f_v6{z_iHPMuQkdBE z`jb&c`l;+YeXH+qe3kUMPm*-KWmr8EsJrYGVcnC?tkdDuSUx@Fz>861zbZmugW>m0 z0O%7AU=d2IE~O)rVliJKS;-?G48d-+?Uas$_NZUv#J% zEpwk9VnxN5QqwsxtJM_>bn954%hFK3w+$YB3)=PH931vmUU>=K@7Wj$bY@e4p+GY+ z&8icSLIopRMEyx!+`|M_jIyky;`P7@&sJs*F%D{yeFNX#cV$5jCh|3S zH&}uKwTA~MePJAmMS&Bg!qz)DaCLEVRou_{4=a4_^PL)@5!3|kSd~F~7femNAzlU1 zwyAbG=ov|ShEPyhYpmcaUMFy4nRTsk5=bX5=$R7OW0MOyX$5pm3}2x?IZRGkDV1>2pY(UFFyKmh=X{@9;AFO0)Pu_ywMz z(T3$UN5|F$jRMqEp+LWnF&7|LFo7DZ9V1wyh!s@EFR-xc7HvwU`CB3B< zMJ!f!fhlUd8(@$HMHyldi>fA?gDG4!;wDsCcCL3Vvs5v}Y=A3AiK_r@J0iFWqVzb_ zYZ^;2exJl}N=h!b7Nvn`XbbKV$DjlB$B7RLo!lZ|QAK;)q8<#2$ZjEHio{o?J|Dl# zR%+X=zQ*c!duwz=#WC%Kpa zWgSLZ31zu@`$t)THM*9}Fyok$*KcDVYCDE?Eud~}Bx;Hg05BQKMvd$en$~`Vt^{G` zzh)_^7OmyfDM9cx{-aWbQ<+Uw<`vOKnBgca20pMh)qkV^0D1_HnCkLC+T-P!Q4FJG zU#vv}i`;OLsvX1}5a*dyxLyric>F0G#kSz2z$+D*jNGb{3(s(zSSu4=KbP+H8=*}s z-E$Y)1w(lA15AT>Q6WnCJh=B1k%FD^G6mCY#S>sbQ364bZ_E`7V7RZ}Y}u!3jK;VG z!zI%M9UQWN{{VP{AvP&(Q+eV&RbhWw_rxo=o4Rk*3zg9I3Ke&M-xGGjf>aqL(@vRE z5zFpIFleQyH>`>Nyw0 z#Y%wVw{oyBm~TARBeos2<|Gs$R0mZHtjC5Twt5*^%w@`eU_uSeTX4KYDn^_rzCP0( zwmLcf)NYr5;!#8VxA_WvRsGEV@U^@{qK+>9Bea>!dUY&bSyI}BGcLw; zEMi!Xq|B!T0$D85v$-DzL|%7B+b6k@aHF8MPOnjMu=v=KM~xI&_C=`u^@ ziA7EMesw8eFsp@w{$M%}Vr5f;wDLTD@LamSe3)dSR4fr%9uD>f^-?oOtmP- zm>5+?DBCn`0L3A}g8B4H4l+-bu%Sed1DU#uju zN45npP?u*tMT!FW_?T({7-4`D8yV-#MpuKpo0M1NKOv^#h4lXbsaR_nm{*BsF>0>J zkQ}j}(pzY?g}Ux!1NNDea4O(l98620D{!{8L|tIS1EUZCSy!u;p>I354iY79-0zsu zAO(iJV&!*|aH0NUy#P{@=t{rdEi9pd3bIuj%y)>QSUlt0PS{1btAI41RRGnc)Yz-G zFLeN?W%wp4zVe&Gyf;-SX;Zu4w>XZ5HYX-e^DkNxL>7<-8EO!e_7)MG5O+(Tq;5*R zI)Q0x%@13Y1klg9R3^$elv=NXBau%I;OZ$;X@+81V$S*8r^H%|)UVVi?eh>?Y;ugU zg=6Et+85M)07El*fdczN&_>3dJ77`dqgux&(xnJY%Cfw|)e&kk{{U(jlpX-yWym(w zgml`+3Uq zq9_}oq07X=LvVTiHwB48kwia>lmIy@_V;!hQ7zTGYZ0U763mAx_gzXwH(R6W5n8R~cMBUR#SqmCNnL! zn}3M1qejJb-XYPJZEGip-gKroh^DfKiZD6(TCXR2nLtv^>QpIAS;eqE1=SAVa(p3k zL1}#MGcrnVBKa}k$Tb@-`O7Vt18s8|*P&|j;xMR3Wk)G6@fpem5{D~KiJz9uX}$?l zt%0==TLo_UBjMdwGa$tS*}w0&RLQjE@dp%DY_3)a6y$pMIX)WMVh9H!RYf!eBQzEH z8eqX=)X8dx?Vr4=_8{stRgCWA>v^0;;M+0Y8<&Y@}=)1we$q3QV zQD5c?f?=kJ)rPzZUoSG3Fi}RLSi^+p9BNU)33`?g5KFAf@MCWUcEnd`9)FpE6b~C^ z5;$ZGRJhZ0;}X!IY^!Jr`$c0p9mc0%yyWxul(UluGSG{dV{vb0iGp5R^u$a#(j34I zyy!$9;x^XVwpmKWOFjy}-=E1~XA|s}n#@MniGwCxODTc^vAKL`cNH7i@fSFVhDv7} zmv?hIIhYyTUh5x)2(jjf(HO555Zb~%C(+*Js(a zh1XSyX_XlhIEjUt+yx@VQmX#|5}_jWF8bFb69g%Ow!u|kVO##xa!{07>UkAg5|L7_ zP|U(CqE)cgzGVTJ-W*KVTfw#MWze$ROCl*Sj}fAkgCj#IATL!bCI<5`DK)AF(#poD z4mJy@av={$85LN!^H^{XpzYRhv7eVBVwDOKyI3JS53&Hr7oMz)~njv1RFYOO|&wxi^W5 zm$}%~UpE%zP3#gCof*26g3E-hKv74yneiUYlhj7AdwGFeU(7RSLC08( zy>k^XQ?4d$D23X$re>~6T5g~Mnq4mvtE+m{4Xn22?jSZ_0^(7InqW}-!uK5^6PMZw zEH;;Tej->_o5ZjWCS1WL=(Je6iCFCNg^j49oc55A8enL;^X6i6LRieftRD)mqynrD zLT-Rxd;a2h3C(O^x@2jursJ5oXOX@p5S`9$W(r-jRinWbP=-rm!wKbVQNlvV@Ebxp zzE%uFZeeP2e4-OgQ(1<}UJ6vu82FADVgQo5MOc^|ml^-`*TCSg`Ew8l^#&nt&)Jslxzly>rY-AR5-a zKp3p_Bb|DPUM&^IDq#S^=?zhQ(t=*h2m^B{^r!ZvE!I_5De`VV#1OT9N%BF`dn8k}ZPRN1-RRsh>H=yI@CBMQTdz5s= z=N~_mytWNKdW2#NH161=Hm-(S z)H5QtOIPX)AlBspWzmM)5aqZmmL3dD9Biyq7;<$6O=FmDnZ>%87P7O7;##_cCd_<7 z(&3C=pwhXO85Y^pwGnP{#3-6e%5fVFuR(|0VaXuf*BFjiHaVNg(PF2UojpLaNSt#l zY3#&=x2kZ#VwTFDejx`_WbAn2D$NlslqHQ8z@9NG3WlnVq9`4}IJsa*U8uE&hkGsH z^BL!RA_H&;Rj&$}b&#kgLp*6L&!J;S_U98y24%pk6!kAKuPsX(sv{xJWvvCl`~FZA z;g1gz!lF4kd4!-VVCk8^42M!NUITa+{hcFUg5Io+%cd~4cr7y&iR4QFby)t zB9s9LJ{9?2o(keBk24k0=36C@2M&x?clyhVZ7BB!iu7OFIAWv`Je!K?dGVM=$UTnX z?km(cP9>}k=5^c^jloUgoW=wcRkxmCAa*(KTxCICh+nu2_v!+ytIm0a&`Zc>h=!_N z7;;Cc8mbsvF4n-QMch^38;wXKUZw+ybTmS#hTsUW!*=+HWT#2Yy>~-A;&42jJArK| zb-7PJb>>%8c?E~kB`$Dk&S6CgwZA~~EKmtvY=)Wo#+D8s_J9Q~^?X9P%PVozJiN@y z2CgXF%0xuq=c!hKHy<+kxF1*Lo!d;hVCF&0q79WQm#L()Xt}3wTbPsexnwU#h^q}= zCpC2jl!P*I7-*KRDpn9jk~eSw_6d--CRU-tLB$y65`!l0Hn3&scV+j!B;c^hAQp>PC`E(mu8i>|%Fp-K|`1-#oCRGZ4B zF)&a^a95UFlv-UuMb_G4yhmwS)672csKi>;4MsxWf0v_0X@@c27-az4f;hI!dWDgY z-D(k9$p$b)7k5>}>O4m~d`%((A@L9~tG#H6WUT7<8?AFNlvX*YX~uer?XObDK}EVC zwu0EN0`~z=qyp`?)D%TC-xC7&UEXCvoN)-&Y4a|RX?2oS4Yp;Cfdh9P4KEV#!-Z;s z019Qlx^64RCBsf*wKgHdSRjWZ%%^I+*VcPX5tQC)IBOTAGjKprH)P9EE``pF7d1G> z1BejX>dJ$Vhd7p4P~r}mWnbuKS_dYWBIIcBz;RvwKcf7n-~)6K8RiBF?V<34_tE)5O=t&8zm1k^D-37zO}-NntAcZ@`w_0OPo#;{+XwCq>5ZT*}QITm|L>mQhb8h}R7) z6Wg|Nsb!7!{%qGE;#&fT-Zc@HNHr4{%Ea+4!0I}M0cO)B)?%|Ln7#caF3q6+@oJ;tt1td zUmv^_-9*e_+ZR}e)h!1h@X^SgD zs_LHN{7*h(wX0N0<;*j2TiRYDOE=Oo^ohnC%F9de!rT|iHf;(%vCMXQjfHfT0HBvk zi}Nb;%yMX4WsJC4iEYtnX1IcaHPm7(Up2V>F0PX>LWO6PgVB!Rrg$66@hMi;xq>!> zhlrC@=32qbqyp9hKUmh8PBJj_}k9%~=OaUn|knrp;YTS`)Rv6%f~Na_!DyHvk*V&;%yimJ1d8 zj&D{2v@Y%Exj;K~cM8R6S91jXJSIf28IsX%gZjLJGWg1eo=Ea#b8*W6XCMD14@ zMhF_WmKnDEF^Nr^UUvl@tq>zgl2YNQ z)+o(Pu%fdZdGkH}V~~{TbqZn46nKD0vkA!!2y^OTs8e==6cu+6eTlrwB_RILI=BLk z_k4Pn(4)JHhgTwVHApK1GXg^#v1C>SRJdqonO5k43#Hf0si0>aLhRkKv4|Awsc?a# zm>O}2(Eznti$z+OQda}O%20=vY%LCZdLo!oLxLt8<0pal* z8+(~j3s{1xQL7q-n*)$EaWupBj*8!yEMnJKfeaOJ2&JX1)Y&MnBrTAxvx!R^a10P> zkM$D;68BJ|1G>1X)(dvub0sg`?p?k6fpjL0U}Kn+3-J*GEUT>k&}=B%f#wl3A7A_s zx>`mrOZ2zH<`W8U?J5U!P|O)u3>#9b6BF#=EM*&Nq0%X_%t?K-7>TwAg_nL(jITXK zNDb176dq-)9tbt4*D}be)+S;F-ziL-0k<;AzqGn3@{Fh{(_|Y;I)ZNY^8>Y^-P{zw z?99u8k1JBnZSftpyk_9CQQgcM7-w5ydVxaXEvoYlTQR}ZH6^Dl{iWiC23JJ7892lU zg$4fbAdAm){{V(B%kp_8RCEJ=A26ONH|9`kHHk_M3%;f0H|jdLGnkmU#KLK-$7C7F zHvaQ$Zds_z2gUab*gE1jUT?k12(fsqLflFQ@Jh3AS3%{NK+=i=0$4McJVAb(Fw;h0 zwdBGL(Y!%w6D4j~-)*ETxMkk{iv1e!`G#zF9ipo?Wa%<1OFrUam$1&`mjqn*6jx&0sbcNRZ^2Unn2 z#s$L}efuIiF~~@l6`;IzAF5oeiLrrbiKL3vd5X0HI_+%%hVG>y= zKJ_eGtrvCC#12_ag>Etg98?hqHs0nsnYj5H?{BQNk?EUYL);q*54dOtfMQaq+$pFz z#${-$pqarcxTqB@!f-)0g2MC-z)>wSy12s9+%AdT2V~SBwQg7&P?Rq+gQpA&V-NsV z-i{@q2i#l>z6by!jQ%7be?G;F@TmMcxtp^aIgF?to=DccpLms(Q;A@@#6~u|+)FVU zWWMGTElNwJFkGb?tJJ$|1;(0>IEP3(E>~fhcPRiHoZvr=00LOq} z{&|Wh{{Rzc*@$N`kQ*?Fy7k00&zj)-A|0G5Se~ zKpjEuE8H=*;e_KTy!(O3=u|PEG=N$b!Yo>J%y}l)5~#3)M_wh4leu6?ZmWqxGdbmt zSBUWn3P0&AA^FxMAMu&+O}L&J;GP+SUyn3FlsP{2E!p*hZ9Bxi;4Y6;!vV6r<|6WI z%o>qln>d4w=%W>JP=u1dZuKOs%G4xQBN^w3-Kut^q;FAhTrrr zz#v2OtVo~nY`XD0bHx(%8>qaIdY(A_Md};GQ&IJ8*NM>mrB(4PcN_8aiG#tH7C(f= zRQ)2pEMuLY6HxwQwE96RD&jX&!#ojso)5tB&llo^1P{;DNd6?diQ}Fb;+`4eo+*D5 z#V?Np@cs+I6R5pK^%vB<4h0Dpe+cnMAAqNcM0lkfPaN@29IJ`q{{Vs{1h4!mi5O$U zJRgDKo(bTd82C>Fd?O^lK*D;CKZt<<{0hE51xFLX2dD=V!#oqiJQKtC1QI|>mHz;X zQ6mhn$|K_+82Co~N5Vc4@Q;i~Qk_c0dX?%2s2-p?hl~eM9YFOD2zbNB9uPf32_mIR z{{YIVQojQa;Xm_XPNjO4>Qkvsr8<=AQ>k8|" + + +@pytest.mark.snapshot +def test_gemini_completion(genai, mock_client): + mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_1)) + llm = genai.GenerativeModel('gemini-1.5-flash') + llm.generate_content( + "What is the argument for LeBron James being the GOAT?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + ) + + +@pytest.mark.snapshot( + token="tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion", + ignores=["resource"], +) +async def test_gemini_completion_async(genai, mock_client_async): + mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_1)) + llm = genai.GenerativeModel('gemini-1.5-flash') + await llm.generate_content_async( + "What is the argument for LeBron James being the GOAT?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + ) + + +@pytest.mark.snapshot +def test_gemini_completion_error(genai, mock_client): + llm = genai.GenerativeModel('gemini-1.5-flash') + llm._client = mock.Mock() + llm._client.generate_content.side_effect = InvalidArgument("Invalid API key. Please pass a valid API key.") + with pytest.raises(InvalidArgument): + llm.generate_content( + "What is the argument for LeBron James being the GOAT?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), + ) + + +@pytest.mark.snapshot( + token="tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_error", + ignores=["resource", "meta.error.stack"], +) +async def test_gemini_completion_error_async(genai, mock_client): + llm = genai.GenerativeModel('gemini-1.5-flash') + llm._async_client = mock.Mock() + llm._async_client.generate_content.side_effect = InvalidArgument("Invalid API key. Please pass a valid API key.") + with pytest.raises(InvalidArgument): + await llm.generate_content_async( + "What is the argument for LeBron James being the GOAT?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), + ) + + +@pytest.mark.snapshot +def test_gemini_completion_multiple_messages(genai, mock_client): + mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_2)) + llm = genai.GenerativeModel('gemini-1.5-flash') + llm.generate_content( + [ + {'role': 'user', 'parts': [{"text": 'Hello world!'}]}, + {'role': 'model', 'parts': [{"text": 'Great to meet you. What would you like to know?'}]}, + {'role': 'user', 'parts': [{"text": 'Why is the sky blue?'}]}, + ], + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + ) + + +@pytest.mark.snapshot( + token="tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_multiple_messages", + ignores=["resource"], +) +async def test_gemini_completion_multiple_messages_async(genai, mock_client_async): + mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_2)) + llm = genai.GenerativeModel('gemini-1.5-flash') + await llm.generate_content_async( + [ + {'role': 'user', 'parts': [{"text": 'Hello world!'}]}, + {'role': 'model', 'parts': [{"text": 'Great to meet you. What would you like to know?'}]}, + {'role': 'user', 'parts': [{"text": 'Why is the sky blue?'}]}, + ], + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + ) + + +@pytest.mark.snapshot( + token="tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_multiple_messages", + ignores=[ # send_message does not include all config options by default + "meta.genai.request.generation_config.candidate_count", + "meta.genai.request.generation_config.top_k", + "meta.genai.request.generation_config.top_p", + "meta.genai.request.generation_config.response_mime_type", + "meta.genai.request.generation_config.response_schema", + ] +) +def test_gemini_chat_completion(genai, mock_client): + mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_2)) + llm = genai.GenerativeModel('gemini-1.5-flash') + chat = llm.start_chat( + history=[ + {"role": "user", "parts": "Hello world!"}, + {"role": "model", "parts": "Great to meet you. What would you like to know?"}, + ] + ) + chat.send_message( + "Why is the sky blue?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + ) + + +@pytest.mark.snapshot( + token="tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_multiple_messages", + ignores=[ # send_message does not include all config options by default + "resource", + "meta.genai.request.generation_config.candidate_count", + "meta.genai.request.generation_config.top_k", + "meta.genai.request.generation_config.top_p", + "meta.genai.request.generation_config.response_mime_type", + "meta.genai.request.generation_config.response_schema", + ] +) +async def test_gemini_chat_completion_async(genai, mock_client_async): + mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_2)) + llm = genai.GenerativeModel('gemini-1.5-flash') + chat = llm.start_chat( + history=[ + {"role": "user", "parts": "Hello world!"}, + {"role": "model", "parts": "Great to meet you. What would you like to know?"}, + ] + ) + await chat.send_message_async( + "Why is the sky blue?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + ) + + +@pytest.mark.snapshot +def test_gemini_completion_system_prompt(genai, mock_client): + mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_SYSTEM)) + llm = genai.GenerativeModel('gemini-1.5-flash', system_instruction="You are a die-hard Michael Jordan fan that always brings stats to the discussion.") + llm.generate_content( + "What is the argument for LeBron James being the GOAT?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + ) + + +@pytest.mark.snapshot( + token="tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_system_prompt", + ignores=["resource"], +) +async def test_gemini_completion_system_prompt_async(genai, mock_client_async): + mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_SYSTEM)) + llm = genai.GenerativeModel('gemini-1.5-flash', + system_instruction="You are a die-hard Michael Jordan fan that always brings stats to the discussion.") + await llm.generate_content_async( + "What is the argument for LeBron James being the GOAT?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + ) + + +@pytest.mark.snapshot +def test_gemini_completion_stream(genai, mock_client): + mock_client.responses["stream_generate_content"] = [(_mock_completion_stream_chunk(chunk) for chunk in MOCK_COMPLETION_STREAM_CHUNKS)] + llm = genai.GenerativeModel('gemini-1.5-flash') + response = llm.generate_content( + "Can you recite the alphabet?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + stream=True, + ) + for _ in response: + pass + + +@pytest.mark.snapshot( + token="tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_stream", + ignores=["resource"], +) +async def test_gemini_completion_stream_async(genai, mock_client_async): + mock_client_async.responses["stream_generate_content"] = [_async_streamed_response(MOCK_COMPLETION_STREAM_CHUNKS)] + llm = genai.GenerativeModel('gemini-1.5-flash') + response = await llm.generate_content_async( + "Can you recite the alphabet?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + stream=True, + ) + async for _ in response: + pass + + +@pytest.mark.snapshot +def test_gemini_tool_completion(genai, mock_client): + mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_TOOL_CALL)) + llm = genai.GenerativeModel('gemini-1.5-flash', tools=[set_light_values]) + llm.generate_content( + "Dim the lights so the room feels cozy and warm.", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), + ) + + +@pytest.mark.snapshot( + token="tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_completion", + ignores=["resource"], +) +async def test_gemini_tool_completion_async(genai, mock_client_async): + mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_TOOL_CALL)) + llm = genai.GenerativeModel('gemini-1.5-flash', tools=[set_light_values]) + await llm.generate_content_async( + "Dim the lights so the room feels cozy and warm.", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), + ) + + +@pytest.mark.snapshot +def test_gemini_tool_chat_completion(genai, mock_client): + mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_TOOL_CALL)) + mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_CHAT_COMPLETION_TOOL_RESPONSE)) + model = genai.GenerativeModel(model_name="gemini-1.5-flash", tools=[set_light_values]) + chat = model.start_chat() + chat.send_message("Dim the lights so the room feels cozy and warm.") + response_parts = [ + genai.protos.Part(function_response=genai.protos.FunctionResponse(name="set_light_values", response={"result": {"brightness": 50, "color_temperature": "warm"}})) + ] + chat.send_message(response_parts) + + +@pytest.mark.snapshot( + token="tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_chat_completion", + ignores=["resource"], +) +async def test_gemini_tool_chat_completion_async(genai, mock_client_async): + mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_TOOL_CALL)) + mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_CHAT_COMPLETION_TOOL_RESPONSE)) + model = genai.GenerativeModel(model_name="gemini-1.5-flash", tools=[set_light_values]) + chat = model.start_chat() + await chat.send_message_async("Dim the lights so the room feels cozy and warm.") + response_parts = [ + genai.protos.Part(function_response=genai.protos.FunctionResponse(name="set_light_values", response={"result": {"brightness": 50, "color_temperature": "warm"}})) + ] + await chat.send_message_async(response_parts) + + +@pytest.mark.snapshot +def test_gemini_completion_tool_stream(genai, mock_client): + mock_client.responses["stream_generate_content"] = [(_mock_completion_stream_chunk(chunk) for chunk in MOCK_COMPLETION_TOOL_CALL_STREAM_CHUNKS)] + llm = genai.GenerativeModel('gemini-1.5-flash', tools=[set_light_values]) + response = llm.generate_content( + "Dim the lights so the room feels cozy and warm.", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), + stream=True + ) + for _ in response: + pass + + +@pytest.mark.snapshot( + token="tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_tool_stream", + ignores=["resource"], +) +async def test_gemini_completion_tool_stream_async(genai, mock_client_async): + mock_client_async.responses["stream_generate_content"] = [_async_streamed_response(MOCK_COMPLETION_TOOL_CALL_STREAM_CHUNKS)] + llm = genai.GenerativeModel('gemini-1.5-flash', tools=[set_light_values]) + response = await llm.generate_content_async( + "Dim the lights so the room feels cozy and warm.", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), + stream=True + ) + async for _ in response: + pass + + +@pytest.mark.snapshot(ignores=["meta.genai.request.contents.0.text"]) +def test_gemini_completion_image(genai, mock_client): + """Ensure passing images to generate_content() won't break patching.""" + img = Image.open(os.path.join(os.path.dirname(__file__), "test_data/apple.jpg")) + mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_IMG_CALL)) + llm = genai.GenerativeModel('gemini-1.5-flash') + llm.generate_content( + [img, "Return a bounding box for the piranha. \n [ymin, xmin, ymax, xmax"], + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), + ) + + +@pytest.mark.snapshot( + token="tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image", + ignores=["resource", "meta.genai.request.contents.0.text"] +) +async def test_gemini_completion_image_async(genai, mock_client_async): + """Ensure passing images to generate_content() won't break patching.""" + img = Image.open(os.path.join(os.path.dirname(__file__), "test_data/apple.jpg")) + mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_IMG_CALL)) + llm = genai.GenerativeModel('gemini-1.5-flash') + await llm.generate_content_async( + [img, "Return a bounding box for the piranha. \n [ymin, xmin, ymax, xmax"], + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), + ) diff --git a/tests/contrib/google_generativeai/test_google_generativeai_patch.py b/tests/contrib/google_generativeai/test_google_generativeai_patch.py new file mode 100644 index 00000000000..470e292a303 --- /dev/null +++ b/tests/contrib/google_generativeai/test_google_generativeai_patch.py @@ -0,0 +1,24 @@ +from ddtrace.contrib.google_generativeai import get_version +from ddtrace.contrib.google_generativeai import patch +from ddtrace.contrib.google_generativeai import unpatch +from tests.contrib.patch import PatchTestCase + + +class TestGoogleGenerativeAIPatch(PatchTestCase.Base): + __integration_name__ = "google_generativeai" + __module_name__ = "google.generativeai" + __patch_func__ = patch + __unpatch_func__ = unpatch + __get_version__ = get_version + + def assert_module_patched(self, genai): + self.assert_wrapped(genai.GenerativeModel.generate_content) + self.assert_wrapped(genai.GenerativeModel.generate_content_async) + + def assert_not_module_patched(self, genai): + self.assert_not_wrapped(genai.GenerativeModel.generate_content) + self.assert_not_wrapped(genai.GenerativeModel.generate_content_async) + + def assert_not_module_double_patched(self, genai): + self.assert_not_double_wrapped(genai.GenerativeModel.generate_content) + self.assert_not_double_wrapped(genai.GenerativeModel.generate_content_async) diff --git a/tests/contrib/google_generativeai/utils.py b/tests/contrib/google_generativeai/utils.py new file mode 100644 index 00000000000..ecb75988edc --- /dev/null +++ b/tests/contrib/google_generativeai/utils.py @@ -0,0 +1,148 @@ +import collections + +import mock +from google.generativeai import protos + + +MOCK_COMPLETION_SIMPLE_1 = { + "candidates": [{"content": {"parts": [{"text": "The argument for LeBron James being the 'Greatest of All Time' (" + "GOAT) is multifaceted and involves a variety of factors. Here's a " + "breakdown"}], "role": "model"}, "finish_reason": 2}], + "usage_metadata": {"prompt_token_count": 12, "candidates_token_count": 30, "total_token_count": 42} +} +MOCK_COMPLETION_SIMPLE_2 = { + "candidates": [{"content": {"parts": [{"text": "The sky appears blue due to a phenomenon called **Rayleigh " + "scattering**. \nHere's how it works:* **Sunlight is made up of " + "all colors of the"}], + "role": "model"}, "finish_reason": 2}], + "usage_metadata": {"prompt_token_count": 24, "candidates_token_count": 35, "total_token_count": 59} +} +MOCK_COMPLETION_SIMPLE_SYSTEM = { + "candidates": [{"content": {"parts": [{"text": "Look, I respect LeBron James. He's a phenomenal player, " + "an incredible athlete, and a great ambassador for the game. But " + "when it comes to the GOAT, the crown belongs to His Airness, " + "Michael Jordan!"}], "role": "model"}, "finish_reason": 2}], + "usage_metadata": {"prompt_token_count": 29, "candidates_token_count": 45, "total_token_count": 74}, +} +MOCK_COMPLETION_STREAM_CHUNKS = ( + {"text": "A", "usage_metadata": {"prompt_token_count": 6, "candidates_token_count": 1, "total_token_count": 7}}, + {"text": ", B, C, D, E, F, G, H, I", + "usage_metadata": {"prompt_token_count": 6, "candidates_token_count": 17, "total_token_count": 23}}, + {"text": ", J, K, L, M, N, O, P, Q", + "usage_metadata": {"prompt_token_count": 6, "candidates_token_count": 33, "total_token_count": 39}}, + {"text": ", R, S, T, U, V, W, X, Y, Z.\n", + "usage_metadata": {"prompt_token_count": 6, "candidates_token_count": 52, "total_token_count": 58}}, +) +MOCK_COMPLETION_TOOL_CALL = { + "candidates": [ + { + "content": { + "parts": [ + { + "function_call": { + "name": "set_light_values", + "args": { + "fields": [{"key": "color_temp", "value": "warm"}, {"key": "brightness", "value": 50}] + } + } + } + ], + "role": "model" + }, + "finish_reason": 2, + } + ], + "usage_metadata": {"prompt_token_count": 150, "candidates_token_count": 25, "total_token_count": 175} +} +MOCK_CHAT_COMPLETION_TOOL_RESPONSE = { + "candidates": [ + { + "content": { + "parts": [{"text": "OK. I've dimmed the lights to 50% and set the color temperature to warm. How's that? \n"}], + "role": "model", + }, + "finish_reason": 2, + }, + ], + "usage_metadata": {"prompt_token_count": 206, "candidates_token_count": 27, "total_token_count": 233} +} +MOCK_COMPLETION_TOOL_CALL_STREAM_CHUNKS = ( + { + "function_call": {"name": "set_light_values", "args": {"fields": [{"key": "color_temp", "value": "warm"}, {"key": "brightness", "value": 50}]}}, + "usage_metadata": {"prompt_token_count": 150, "candidates_token_count": 25, "total_token_count": 175}, + + }, +) +MOCK_COMPLETION_IMG_CALL = { + "candidates": [{"content": {"parts": [{"text": "57 100 900 911"}], "role": "model"}, "finish_reason": 2}], + "usage_metadata": {"prompt_token_count": 277, "candidates_token_count": 14, "total_token_count": 291} +} + + +class MockGenerativeModelClient: + def __init__(self): + self.responses = collections.defaultdict(list) + self._client_options = mock.Mock() + self._client_options.api_key = "" + + def generate_content(self, request, **kwargs): + return self.responses["generate_content"].pop(0) + + def stream_generate_content(self, request, **kwargs): + return self.responses["stream_generate_content"].pop(0) + + +class MockGenerativeModelAsyncClient: + def __init__(self): + self.responses = collections.defaultdict(list) + self._client = mock.Mock() + self._client_options = mock.Mock() + self._client._client_options = self._client_options + self._client_options.api_key = "" + + async def generate_content(self, request, **kwargs): + return self.responses["generate_content"].pop(0) + + async def stream_generate_content(self, request, **kwargs): + return self.responses["stream_generate_content"].pop(0) + + +def set_light_values(brightness, color_temp): + """Set the brightness and color temperature of a room light. (mock API). + Args: + brightness: Light level from 0 to 100. Zero is off and 100 is full brightness + color_temp: Color temperature of the light fixture, which can be `daylight`, `cool` or `warm`. + Returns: + A dictionary containing the set brightness and color temperature. + """ + return { + "brightness": brightness, + "colorTemperature": color_temp + } + + +async def _async_streamed_response(mock_chunks): + """Return async streamed response chunks to be processed by the mock async client.""" + for chunk in mock_chunks: + yield _mock_completion_stream_chunk(chunk) + + +def _mock_completion_response(mock_completion_dict): + mock_content = protos.Content(mock_completion_dict["candidates"][0]["content"]) + return protos.GenerateContentResponse( + { + "candidates": [{"content": mock_content, "finish_reason": mock_completion_dict["candidates"][0]["finish_reason"]}], + "usage_metadata": mock_completion_dict["usage_metadata"], + } + ) + + +def _mock_completion_stream_chunk(chunk): + mock_content = None + if chunk.get("text"): + mock_content = protos.Content({"parts": [{"text": chunk["text"]}], "role": "model"}) + elif chunk.get("function_call"): + mock_content = protos.Content({"parts": [{"function_call": chunk["function_call"]}], "role": "model"}) + return protos.GenerateContentResponse( + {"candidates": [{"content": mock_content, "finish_reason": 2}], "usage_metadata": chunk["usage_metadata"]} + ) diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion.json new file mode 100644 index 00000000000..3dd7ab7c763 --- /dev/null +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion.json @@ -0,0 +1,43 @@ +[[ + { + "name": "gemini.request", + "service": "", + "resource": "GenerativeModel.generate_content", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "66da21e900000000", + "genai.request.api_key": "...key>", + "genai.request.contents.0.text": "What is the argument for LeBron James being the GOAT?", + "genai.request.generation_config.candidate_count": "None", + "genai.request.generation_config.max_output_tokens": "35", + "genai.request.generation_config.response_mime_type": "None", + "genai.request.generation_config.response_schema": "None", + "genai.request.generation_config.stop_sequences": "['x']", + "genai.request.generation_config.temperature": "1.0", + "genai.request.generation_config.top_k": "None", + "genai.request.generation_config.top_p": "None", + "genai.request.model": "gemini-1.5-flash", + "genai.response.candidates.0.content.parts.0.text": "The argument for LeBron James being the 'Greatest of All Time' (GOAT) is multifaceted and involves a variety of factors. Here's ...", + "genai.response.candidates.0.content.role": "model", + "genai.response.candidates.0.finish_reason": "2", + "language": "python", + "runtime-id": "f88d6bc3be7e40949d80435242c79aee" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "genai.response.usage.completion_tokens": 30, + "genai.response.usage.prompt_tokens": 12, + "genai.response.usage.total_tokens": 42, + "process_id": 39040 + }, + "duration": 579239000, + "start": 1725571561366580000 + }]] diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_error.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_error.json new file mode 100644 index 00000000000..5959a2be298 --- /dev/null +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_error.json @@ -0,0 +1,39 @@ +[[ + { + "name": "gemini.request", + "service": "", + "resource": "GenerativeModel.generate_content", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 1, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "66db429700000000", + "error.message": "400 Invalid API key. Please pass a valid API key.", + "error.stack": "Traceback (most recent call last):\n File \"/Users/yun.kim/go/src/github.com/DataDog/dd-trace-py/ddtrace/contrib/internal/google_generativeai/patch.py\", line 47, in traced_generate\n generations = func(*args, **kwargs)\n File \"/Users/yun.kim/go/src/github.com/DataDog/dd-trace-py/.riot/venv_py3105_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_pytest-asyncio_google-generativeai~072/lib/python3.10/site-packages/google/generativeai/generative_models.py\", line 331, in generate_content\n response = self._client.generate_content(\n File \"/Users/yun.kim/go/src/github.com/DataDog/dd-trace-py/.riot/venv_py3105_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_pytest-asyncio_google-generativeai~072/lib/python3.10/site-packages/mock/mock.py\", line 1178, in __call__\n return _mock_self._mock_call(*args, **kwargs)\n File \"/Users/yun.kim/go/src/github.com/DataDog/dd-trace-py/.riot/venv_py3105_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_pytest-asyncio_google-generativeai~072/lib/python3.10/site-packages/mock/mock.py\", line 1182, in _mock_call\n return _mock_self._execute_mock_call(*args, **kwargs)\n File \"/Users/yun.kim/go/src/github.com/DataDog/dd-trace-py/.riot/venv_py3105_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_pytest-asyncio_google-generativeai~072/lib/python3.10/site-packages/mock/mock.py\", line 1239, in _execute_mock_call\n raise effect\ngoogle.api_core.exceptions.InvalidArgument: 400 Invalid API key. Please pass a valid API key.\n", + "error.type": "google.api_core.exceptions.InvalidArgument", + "genai.request.contents.0.text": "What is the argument for LeBron James being the GOAT?", + "genai.request.generation_config.candidate_count": "None", + "genai.request.generation_config.max_output_tokens": "30", + "genai.request.generation_config.response_mime_type": "None", + "genai.request.generation_config.response_schema": "None", + "genai.request.generation_config.stop_sequences": "['x']", + "genai.request.generation_config.temperature": "1.0", + "genai.request.generation_config.top_k": "None", + "genai.request.generation_config.top_p": "None", + "genai.request.model": "gemini-1.5-flash", + "language": "python", + "runtime-id": "c20f6ea1fd834b0094c087a8dd7550ec" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 85884 + }, + "duration": 2204000, + "start": 1725645463502274000 + }]] diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json new file mode 100644 index 00000000000..a641ad0225a --- /dev/null +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json @@ -0,0 +1,44 @@ +[[ + { + "name": "gemini.request", + "service": "", + "resource": "GenerativeModel.generate_content", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "66df5b6b00000000", + "genai.request.api_key": "...key>", + "genai.request.contents.0.text": "[Non-text content object: ]", + "genai.request.contents.1.text": "Return a bounding box for the piranha. \\n [ymin, xmin, ymax, xmax", + "genai.request.generation_config.candidate_count": "None", + "genai.request.generation_config.max_output_tokens": "30", + "genai.request.generation_config.response_mime_type": "None", + "genai.request.generation_config.response_schema": "None", + "genai.request.generation_config.stop_sequences": "['x']", + "genai.request.generation_config.temperature": "1.0", + "genai.request.generation_config.top_k": "None", + "genai.request.generation_config.top_p": "None", + "genai.request.model": "gemini-1.5-flash", + "genai.response.candidates.0.content.parts.0.text": "57 100 900 911", + "genai.response.candidates.0.content.role": "model", + "genai.response.candidates.0.finish_reason": "2", + "language": "python", + "runtime-id": "ef31792639f64e3a94a13cb358079ca3" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "genai.response.usage.completion_tokens": 14, + "genai.response.usage.prompt_tokens": 277, + "genai.response.usage.total_tokens": 291, + "process_id": 20810 + }, + "duration": 4446000, + "start": 1725913963608949000 + }]] diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_multiple_messages.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_multiple_messages.json new file mode 100644 index 00000000000..f842fd1f8cd --- /dev/null +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_multiple_messages.json @@ -0,0 +1,48 @@ +[[ + { + "name": "gemini.request", + "service": "", + "resource": "GenerativeModel.generate_content", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "66db5e4600000000", + "genai.request.api_key": "...key>", + "genai.request.contents.0.parts.0.text": "Hello world!", + "genai.request.contents.0.role": "user", + "genai.request.contents.1.parts.0.text": "Great to meet you. What would you like to know?", + "genai.request.contents.1.role": "model", + "genai.request.contents.2.parts.0.text": "Why is the sky blue?", + "genai.request.contents.2.role": "user", + "genai.request.generation_config.candidate_count": "None", + "genai.request.generation_config.max_output_tokens": "35", + "genai.request.generation_config.response_mime_type": "None", + "genai.request.generation_config.response_schema": "None", + "genai.request.generation_config.stop_sequences": "['x']", + "genai.request.generation_config.temperature": "1.0", + "genai.request.generation_config.top_k": "None", + "genai.request.generation_config.top_p": "None", + "genai.request.model": "gemini-1.5-flash", + "genai.response.candidates.0.content.parts.0.text": "The sky appears blue due to a phenomenon called **Rayleigh scattering**. \\nHere's how it works:* **Sunlight is made up of all co...", + "genai.response.candidates.0.content.role": "model", + "genai.response.candidates.0.finish_reason": "2", + "language": "python", + "runtime-id": "9efce87e47184fb8bec5228e67b84e90" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "genai.response.usage.completion_tokens": 35, + "genai.response.usage.prompt_tokens": 24, + "genai.response.usage.total_tokens": 59, + "process_id": 13297 + }, + "duration": 613000, + "start": 1725652550194481000 + }]] diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_stream.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_stream.json new file mode 100644 index 00000000000..e01780c0b92 --- /dev/null +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_stream.json @@ -0,0 +1,44 @@ +[[ + { + "name": "gemini.request", + "service": "", + "resource": "GenerativeModel.generate_content", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "66df35a500000000", + "genai.request.api_key": "...key>", + "genai.request.contents.0.text": "Can you recite the alphabet?", + "genai.request.generation_config.candidate_count": "None", + "genai.request.generation_config.max_output_tokens": "35", + "genai.request.generation_config.response_mime_type": "None", + "genai.request.generation_config.response_schema": "None", + "genai.request.generation_config.stop_sequences": "['x']", + "genai.request.generation_config.temperature": "1.0", + "genai.request.generation_config.top_k": "None", + "genai.request.generation_config.top_p": "None", + "genai.request.model": "gemini-1.5-flash", + "genai.request.stream": "True", + "genai.response.candidates.0.content.parts.0.text": "A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z.\\n", + "genai.response.candidates.0.content.role": "model", + "genai.response.candidates.0.finish_reason": "2", + "language": "python", + "runtime-id": "ab34a9e677524b1b85ca63038b94e284" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "genai.response.usage.completion_tokens": 52, + "genai.response.usage.prompt_tokens": 6, + "genai.response.usage.total_tokens": 58, + "process_id": 85545 + }, + "duration": 1269000, + "start": 1725904293506230000 + }]] diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_system_prompt.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_system_prompt.json new file mode 100644 index 00000000000..973da5b3167 --- /dev/null +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_system_prompt.json @@ -0,0 +1,44 @@ +[[ + { + "name": "gemini.request", + "service": "", + "resource": "GenerativeModel.generate_content", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "66df2acb00000000", + "genai.request.api_key": "...key>", + "genai.request.contents.0.text": "What is the argument for LeBron James being the GOAT?", + "genai.request.generation_config.candidate_count": "None", + "genai.request.generation_config.max_output_tokens": "35", + "genai.request.generation_config.response_mime_type": "None", + "genai.request.generation_config.response_schema": "None", + "genai.request.generation_config.stop_sequences": "['x']", + "genai.request.generation_config.temperature": "1.0", + "genai.request.generation_config.top_k": "None", + "genai.request.generation_config.top_p": "None", + "genai.request.model": "gemini-1.5-flash", + "genai.request.system_instruction.0.text": "You are a die-hard Michael Jordan fan that always brings stats to the discussion.", + "genai.response.candidates.0.content.parts.0.text": "Look, I respect LeBron James. He's a phenomenal player, an incredible athlete, and a great ambassador for the game. But when it ...", + "genai.response.candidates.0.content.role": "model", + "genai.response.candidates.0.finish_reason": "2", + "language": "python", + "runtime-id": "14742cbdf7694bc5b250790e5a9985f1" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "genai.response.usage.completion_tokens": 45, + "genai.response.usage.prompt_tokens": 29, + "genai.response.usage.total_tokens": 74, + "process_id": 77455 + }, + "duration": 339000, + "start": 1725901515291773000 + }]] diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_tool_stream.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_tool_stream.json new file mode 100644 index 00000000000..490f8fa6acf --- /dev/null +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_tool_stream.json @@ -0,0 +1,46 @@ +[[ + { + "name": "gemini.request", + "service": "", + "resource": "GenerativeModel.generate_content", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "66df431200000000", + "genai.request.api_key": "...key>", + "genai.request.contents.0.text": "Dim the lights so the room feels cozy and warm.", + "genai.request.generation_config.candidate_count": "None", + "genai.request.generation_config.max_output_tokens": "30", + "genai.request.generation_config.response_mime_type": "None", + "genai.request.generation_config.response_schema": "None", + "genai.request.generation_config.stop_sequences": "['x']", + "genai.request.generation_config.temperature": "1.0", + "genai.request.generation_config.top_k": "None", + "genai.request.generation_config.top_p": "None", + "genai.request.model": "gemini-1.5-flash", + "genai.request.stream": "True", + "genai.response.candidates.0.content.parts.0.function_call.args": "{'fields': [{'key': 'color_temp', 'value': 'warm'}, {'key': 'brightness', 'value': 50.0}]}", + "genai.response.candidates.0.content.parts.0.function_call.name": "set_light_values", + "genai.response.candidates.0.content.parts.0.text": "", + "genai.response.candidates.0.content.role": "model", + "genai.response.candidates.0.finish_reason": "2", + "language": "python", + "runtime-id": "3188ecf703b5409ab3405dfd2c201aa6" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "genai.response.usage.completion_tokens": 25, + "genai.response.usage.prompt_tokens": 150, + "genai.response.usage.total_tokens": 175, + "process_id": 96885 + }, + "duration": 721000, + "start": 1725907730514245000 + }]] diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_chat_completion.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_chat_completion.json new file mode 100644 index 00000000000..777b57a1d9a --- /dev/null +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_chat_completion.json @@ -0,0 +1,82 @@ +[[ + { + "name": "gemini.request", + "service": "", + "resource": "GenerativeModel.generate_content", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "66db74d900000000", + "genai.request.api_key": "...key>", + "genai.request.contents.0.parts.0.text": "Dim the lights so the room feels cozy and warm.", + "genai.request.contents.0.role": "user", + "genai.request.model": "gemini-1.5-flash", + "genai.response.candidates.0.content.parts.0.function_call.args": "{'fields': [{'key': 'color_temp', 'value': 'warm'}, {'key': 'brightness', 'value': 50.0}]}", + "genai.response.candidates.0.content.parts.0.function_call.name": "set_light_values", + "genai.response.candidates.0.content.parts.0.text": "", + "genai.response.candidates.0.content.role": "model", + "genai.response.candidates.0.finish_reason": "2", + "language": "python", + "runtime-id": "22733330fbbe48118bd9b9d1d58d6ee4" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "genai.response.usage.completion_tokens": 25, + "genai.response.usage.prompt_tokens": 150, + "genai.response.usage.total_tokens": 175, + "process_id": 35758 + }, + "duration": 569000, + "start": 1725658329842913000 + }], +[ + { + "name": "gemini.request", + "service": "", + "resource": "GenerativeModel.generate_content", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "66db74d900000000", + "genai.request.api_key": "...key>", + "genai.request.contents.0.parts.0.text": "Dim the lights so the room feels cozy and warm.", + "genai.request.contents.0.role": "user", + "genai.request.contents.1.parts.0.function_call.args": "{'fields': [{'key': 'color_temp', 'value': 'warm'}, {'key': 'brightness', 'value': 50.0}]}", + "genai.request.contents.1.parts.0.function_call.name": "set_light_values", + "genai.request.contents.1.parts.0.text": "", + "genai.request.contents.1.role": "model", + "genai.request.contents.2.parts.0.function_response.name": "set_light_values", + "genai.request.contents.2.parts.0.function_response.response": "{'result': {'color_temperature': 'warm', 'brightness': 50.0}}", + "genai.request.contents.2.parts.0.text": "", + "genai.request.contents.2.role": "user", + "genai.request.model": "gemini-1.5-flash", + "genai.response.candidates.0.content.parts.0.text": "OK. I've dimmed the lights to 50% and set the color temperature to warm. How's that? \\n", + "genai.response.candidates.0.content.role": "model", + "genai.response.candidates.0.finish_reason": "2", + "language": "python", + "runtime-id": "22733330fbbe48118bd9b9d1d58d6ee4" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "genai.response.usage.completion_tokens": 27, + "genai.response.usage.prompt_tokens": 206, + "genai.response.usage.total_tokens": 233, + "process_id": 35758 + }, + "duration": 397000, + "start": 1725658329845583000 + }]] diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_completion.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_completion.json new file mode 100644 index 00000000000..93c99f18d6f --- /dev/null +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_completion.json @@ -0,0 +1,45 @@ +[[ + { + "name": "gemini.request", + "service": "", + "resource": "GenerativeModel.generate_content", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "66db701d00000000", + "genai.request.api_key": "...key>", + "genai.request.contents.0.text": "Dim the lights so the room feels cozy and warm.", + "genai.request.generation_config.candidate_count": "None", + "genai.request.generation_config.max_output_tokens": "30", + "genai.request.generation_config.response_mime_type": "None", + "genai.request.generation_config.response_schema": "None", + "genai.request.generation_config.stop_sequences": "['x']", + "genai.request.generation_config.temperature": "1.0", + "genai.request.generation_config.top_k": "None", + "genai.request.generation_config.top_p": "None", + "genai.request.model": "gemini-1.5-flash", + "genai.response.candidates.0.content.parts.0.function_call.args": "{'fields': [{'key': 'color_temp', 'value': 'warm'}, {'key': 'brightness', 'value': 50.0}]}", + "genai.response.candidates.0.content.parts.0.function_call.name": "set_light_values", + "genai.response.candidates.0.content.parts.0.text": "", + "genai.response.candidates.0.content.role": "model", + "genai.response.candidates.0.finish_reason": "2", + "language": "python", + "runtime-id": "50c5d175cda64dabac9f8662880d7ca5" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "genai.response.usage.completion_tokens": 25, + "genai.response.usage.prompt_tokens": 150, + "genai.response.usage.total_tokens": 175, + "process_id": 31686 + }, + "duration": 627000, + "start": 1725657117233350000 + }]] From ccc3349f737ebe3f3749f0eee329c512c057da9d Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Mon, 9 Sep 2024 16:49:23 -0400 Subject: [PATCH 07/32] fmt --- .../internal/google_generativeai/_utils.py | 2 +- riotfile.py | 2 +- tests/contrib/google_generativeai/conftest.py | 6 +- .../test_google_generativeai.py | 107 ++++++++++------- tests/contrib/google_generativeai/utils.py | 112 ++++++++++++------ 5 files changed, 147 insertions(+), 82 deletions(-) diff --git a/ddtrace/contrib/internal/google_generativeai/_utils.py b/ddtrace/contrib/internal/google_generativeai/_utils.py index 9ef3d2a96dd..0164984dda5 100644 --- a/ddtrace/contrib/internal/google_generativeai/_utils.py +++ b/ddtrace/contrib/internal/google_generativeai/_utils.py @@ -120,7 +120,7 @@ def _tag_request_content(span, integration, content, content_idx): if not parts: span.set_tag_str( "genai.request.contents.%d.text" % content_idx, - integration.trunc("[Non-text content object: {}]".format(repr(content))) + integration.trunc("[Non-text content object: {}]".format(repr(content))), ) return for part_idx, part in enumerate(parts): diff --git a/riotfile.py b/riotfile.py index c0f196a92f9..1b358cb641f 100644 --- a/riotfile.py +++ b/riotfile.py @@ -2677,7 +2677,7 @@ def select_pys(min_version=MIN_PYTHON_VERSION, max_version=MAX_PYTHON_VERSION): "pytest-asyncio": latest, "google-generativeai": ["~=0.7.2"], "pillow": latest, - } + }, ), Venv( name="logbook", diff --git a/tests/contrib/google_generativeai/conftest.py b/tests/contrib/google_generativeai/conftest.py index fe7996f0e66..1a4a5c057a6 100644 --- a/tests/contrib/google_generativeai/conftest.py +++ b/tests/contrib/google_generativeai/conftest.py @@ -2,11 +2,11 @@ import pytest -from ddtrace.pin import Pin from ddtrace.contrib.google_generativeai import patch from ddtrace.contrib.google_generativeai import unpatch -from tests.contrib.google_generativeai.utils import MockGenerativeModelClient +from ddtrace.pin import Pin from tests.contrib.google_generativeai.utils import MockGenerativeModelAsyncClient +from tests.contrib.google_generativeai.utils import MockGenerativeModelClient from tests.utils import DummyTracer from tests.utils import DummyWriter from tests.utils import override_config @@ -60,8 +60,8 @@ def genai(ddtrace_global_config, ddtrace_config_google_generativeai, mock_client dict(GOOGLE_GENERATIVEAI_API_KEY=os.getenv("GOOGLE_GENERATIVEAI_API_KEY", "")) ): patch() - from google.generativeai import client as client_lib import google.generativeai as genai + from google.generativeai import client as client_lib client_lib._client_manager.clients["generative"] = mock_client client_lib._client_manager.clients["generative_async"] = mock_client_async diff --git a/tests/contrib/google_generativeai/test_google_generativeai.py b/tests/contrib/google_generativeai/test_google_generativeai.py index 6e02d08a849..efc1ee96f8d 100644 --- a/tests/contrib/google_generativeai/test_google_generativeai.py +++ b/tests/contrib/google_generativeai/test_google_generativeai.py @@ -1,14 +1,10 @@ import os -from PIL import Image +from google.api_core.exceptions import InvalidArgument import mock +from PIL import Image import pytest -from google.api_core.exceptions import InvalidArgument -from tests.utils import override_global_config -from tests.contrib.google_generativeai.utils import _async_streamed_response -from tests.contrib.google_generativeai.utils import _mock_completion_response -from tests.contrib.google_generativeai.utils import _mock_completion_stream_chunk from tests.contrib.google_generativeai.utils import MOCK_CHAT_COMPLETION_TOOL_RESPONSE from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_IMG_CALL from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_SIMPLE_1 @@ -17,7 +13,11 @@ from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_STREAM_CHUNKS from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_TOOL_CALL from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_TOOL_CALL_STREAM_CHUNKS +from tests.contrib.google_generativeai.utils import _async_streamed_response +from tests.contrib.google_generativeai.utils import _mock_completion_response +from tests.contrib.google_generativeai.utils import _mock_completion_stream_chunk from tests.contrib.google_generativeai.utils import set_light_values +from tests.utils import override_global_config def test_global_tags(genai, mock_client, mock_tracer): @@ -28,7 +28,7 @@ def test_global_tags(genai, mock_client, mock_tracer): The version should be used for all data """ mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_1)) - llm = genai.GenerativeModel('gemini-1.5-flash') + llm = genai.GenerativeModel("gemini-1.5-flash") with override_global_config(dict(service="test-svc", env="staging", version="1234")): llm.generate_content( "What is the argument for LeBron James being the GOAT?", @@ -47,7 +47,7 @@ def test_global_tags(genai, mock_client, mock_tracer): @pytest.mark.snapshot def test_gemini_completion(genai, mock_client): mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_1)) - llm = genai.GenerativeModel('gemini-1.5-flash') + llm = genai.GenerativeModel("gemini-1.5-flash") llm.generate_content( "What is the argument for LeBron James being the GOAT?", generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), @@ -60,7 +60,7 @@ def test_gemini_completion(genai, mock_client): ) async def test_gemini_completion_async(genai, mock_client_async): mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_1)) - llm = genai.GenerativeModel('gemini-1.5-flash') + llm = genai.GenerativeModel("gemini-1.5-flash") await llm.generate_content_async( "What is the argument for LeBron James being the GOAT?", generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), @@ -69,7 +69,7 @@ async def test_gemini_completion_async(genai, mock_client_async): @pytest.mark.snapshot def test_gemini_completion_error(genai, mock_client): - llm = genai.GenerativeModel('gemini-1.5-flash') + llm = genai.GenerativeModel("gemini-1.5-flash") llm._client = mock.Mock() llm._client.generate_content.side_effect = InvalidArgument("Invalid API key. Please pass a valid API key.") with pytest.raises(InvalidArgument): @@ -84,7 +84,7 @@ def test_gemini_completion_error(genai, mock_client): ignores=["resource", "meta.error.stack"], ) async def test_gemini_completion_error_async(genai, mock_client): - llm = genai.GenerativeModel('gemini-1.5-flash') + llm = genai.GenerativeModel("gemini-1.5-flash") llm._async_client = mock.Mock() llm._async_client.generate_content.side_effect = InvalidArgument("Invalid API key. Please pass a valid API key.") with pytest.raises(InvalidArgument): @@ -97,12 +97,12 @@ async def test_gemini_completion_error_async(genai, mock_client): @pytest.mark.snapshot def test_gemini_completion_multiple_messages(genai, mock_client): mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_2)) - llm = genai.GenerativeModel('gemini-1.5-flash') + llm = genai.GenerativeModel("gemini-1.5-flash") llm.generate_content( [ - {'role': 'user', 'parts': [{"text": 'Hello world!'}]}, - {'role': 'model', 'parts': [{"text": 'Great to meet you. What would you like to know?'}]}, - {'role': 'user', 'parts': [{"text": 'Why is the sky blue?'}]}, + {"role": "user", "parts": [{"text": "Hello world!"}]}, + {"role": "model", "parts": [{"text": "Great to meet you. What would you like to know?"}]}, + {"role": "user", "parts": [{"text": "Why is the sky blue?"}]}, ], generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), ) @@ -114,12 +114,12 @@ def test_gemini_completion_multiple_messages(genai, mock_client): ) async def test_gemini_completion_multiple_messages_async(genai, mock_client_async): mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_2)) - llm = genai.GenerativeModel('gemini-1.5-flash') + llm = genai.GenerativeModel("gemini-1.5-flash") await llm.generate_content_async( [ - {'role': 'user', 'parts': [{"text": 'Hello world!'}]}, - {'role': 'model', 'parts': [{"text": 'Great to meet you. What would you like to know?'}]}, - {'role': 'user', 'parts': [{"text": 'Why is the sky blue?'}]}, + {"role": "user", "parts": [{"text": "Hello world!"}]}, + {"role": "model", "parts": [{"text": "Great to meet you. What would you like to know?"}]}, + {"role": "user", "parts": [{"text": "Why is the sky blue?"}]}, ], generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), ) @@ -133,11 +133,11 @@ async def test_gemini_completion_multiple_messages_async(genai, mock_client_asyn "meta.genai.request.generation_config.top_p", "meta.genai.request.generation_config.response_mime_type", "meta.genai.request.generation_config.response_schema", - ] + ], ) def test_gemini_chat_completion(genai, mock_client): mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_2)) - llm = genai.GenerativeModel('gemini-1.5-flash') + llm = genai.GenerativeModel("gemini-1.5-flash") chat = llm.start_chat( history=[ {"role": "user", "parts": "Hello world!"}, @@ -159,11 +159,11 @@ def test_gemini_chat_completion(genai, mock_client): "meta.genai.request.generation_config.top_p", "meta.genai.request.generation_config.response_mime_type", "meta.genai.request.generation_config.response_schema", - ] + ], ) async def test_gemini_chat_completion_async(genai, mock_client_async): mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_2)) - llm = genai.GenerativeModel('gemini-1.5-flash') + llm = genai.GenerativeModel("gemini-1.5-flash") chat = llm.start_chat( history=[ {"role": "user", "parts": "Hello world!"}, @@ -179,7 +179,10 @@ async def test_gemini_chat_completion_async(genai, mock_client_async): @pytest.mark.snapshot def test_gemini_completion_system_prompt(genai, mock_client): mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_SYSTEM)) - llm = genai.GenerativeModel('gemini-1.5-flash', system_instruction="You are a die-hard Michael Jordan fan that always brings stats to the discussion.") + llm = genai.GenerativeModel( + "gemini-1.5-flash", + system_instruction="You are a die-hard Michael Jordan fan that always brings stats to the discussion.", + ) llm.generate_content( "What is the argument for LeBron James being the GOAT?", generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), @@ -192,8 +195,10 @@ def test_gemini_completion_system_prompt(genai, mock_client): ) async def test_gemini_completion_system_prompt_async(genai, mock_client_async): mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_SYSTEM)) - llm = genai.GenerativeModel('gemini-1.5-flash', - system_instruction="You are a die-hard Michael Jordan fan that always brings stats to the discussion.") + llm = genai.GenerativeModel( + "gemini-1.5-flash", + system_instruction="You are a die-hard Michael Jordan fan that always brings stats to the discussion.", + ) await llm.generate_content_async( "What is the argument for LeBron James being the GOAT?", generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), @@ -202,8 +207,10 @@ async def test_gemini_completion_system_prompt_async(genai, mock_client_async): @pytest.mark.snapshot def test_gemini_completion_stream(genai, mock_client): - mock_client.responses["stream_generate_content"] = [(_mock_completion_stream_chunk(chunk) for chunk in MOCK_COMPLETION_STREAM_CHUNKS)] - llm = genai.GenerativeModel('gemini-1.5-flash') + mock_client.responses["stream_generate_content"] = [ + (_mock_completion_stream_chunk(chunk) for chunk in MOCK_COMPLETION_STREAM_CHUNKS) + ] + llm = genai.GenerativeModel("gemini-1.5-flash") response = llm.generate_content( "Can you recite the alphabet?", generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), @@ -219,7 +226,7 @@ def test_gemini_completion_stream(genai, mock_client): ) async def test_gemini_completion_stream_async(genai, mock_client_async): mock_client_async.responses["stream_generate_content"] = [_async_streamed_response(MOCK_COMPLETION_STREAM_CHUNKS)] - llm = genai.GenerativeModel('gemini-1.5-flash') + llm = genai.GenerativeModel("gemini-1.5-flash") response = await llm.generate_content_async( "Can you recite the alphabet?", generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), @@ -232,7 +239,7 @@ async def test_gemini_completion_stream_async(genai, mock_client_async): @pytest.mark.snapshot def test_gemini_tool_completion(genai, mock_client): mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_TOOL_CALL)) - llm = genai.GenerativeModel('gemini-1.5-flash', tools=[set_light_values]) + llm = genai.GenerativeModel("gemini-1.5-flash", tools=[set_light_values]) llm.generate_content( "Dim the lights so the room feels cozy and warm.", generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), @@ -245,7 +252,7 @@ def test_gemini_tool_completion(genai, mock_client): ) async def test_gemini_tool_completion_async(genai, mock_client_async): mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_TOOL_CALL)) - llm = genai.GenerativeModel('gemini-1.5-flash', tools=[set_light_values]) + llm = genai.GenerativeModel("gemini-1.5-flash", tools=[set_light_values]) await llm.generate_content_async( "Dim the lights so the room feels cozy and warm.", generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), @@ -260,7 +267,11 @@ def test_gemini_tool_chat_completion(genai, mock_client): chat = model.start_chat() chat.send_message("Dim the lights so the room feels cozy and warm.") response_parts = [ - genai.protos.Part(function_response=genai.protos.FunctionResponse(name="set_light_values", response={"result": {"brightness": 50, "color_temperature": "warm"}})) + genai.protos.Part( + function_response=genai.protos.FunctionResponse( + name="set_light_values", response={"result": {"brightness": 50, "color_temperature": "warm"}} + ) + ) ] chat.send_message(response_parts) @@ -271,24 +282,32 @@ def test_gemini_tool_chat_completion(genai, mock_client): ) async def test_gemini_tool_chat_completion_async(genai, mock_client_async): mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_TOOL_CALL)) - mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_CHAT_COMPLETION_TOOL_RESPONSE)) + mock_client_async.responses["generate_content"].append( + _mock_completion_response(MOCK_CHAT_COMPLETION_TOOL_RESPONSE) + ) model = genai.GenerativeModel(model_name="gemini-1.5-flash", tools=[set_light_values]) chat = model.start_chat() await chat.send_message_async("Dim the lights so the room feels cozy and warm.") response_parts = [ - genai.protos.Part(function_response=genai.protos.FunctionResponse(name="set_light_values", response={"result": {"brightness": 50, "color_temperature": "warm"}})) + genai.protos.Part( + function_response=genai.protos.FunctionResponse( + name="set_light_values", response={"result": {"brightness": 50, "color_temperature": "warm"}} + ) + ) ] await chat.send_message_async(response_parts) @pytest.mark.snapshot def test_gemini_completion_tool_stream(genai, mock_client): - mock_client.responses["stream_generate_content"] = [(_mock_completion_stream_chunk(chunk) for chunk in MOCK_COMPLETION_TOOL_CALL_STREAM_CHUNKS)] - llm = genai.GenerativeModel('gemini-1.5-flash', tools=[set_light_values]) + mock_client.responses["stream_generate_content"] = [ + (_mock_completion_stream_chunk(chunk) for chunk in MOCK_COMPLETION_TOOL_CALL_STREAM_CHUNKS) + ] + llm = genai.GenerativeModel("gemini-1.5-flash", tools=[set_light_values]) response = llm.generate_content( "Dim the lights so the room feels cozy and warm.", generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), - stream=True + stream=True, ) for _ in response: pass @@ -299,12 +318,14 @@ def test_gemini_completion_tool_stream(genai, mock_client): ignores=["resource"], ) async def test_gemini_completion_tool_stream_async(genai, mock_client_async): - mock_client_async.responses["stream_generate_content"] = [_async_streamed_response(MOCK_COMPLETION_TOOL_CALL_STREAM_CHUNKS)] - llm = genai.GenerativeModel('gemini-1.5-flash', tools=[set_light_values]) + mock_client_async.responses["stream_generate_content"] = [ + _async_streamed_response(MOCK_COMPLETION_TOOL_CALL_STREAM_CHUNKS) + ] + llm = genai.GenerativeModel("gemini-1.5-flash", tools=[set_light_values]) response = await llm.generate_content_async( "Dim the lights so the room feels cozy and warm.", generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), - stream=True + stream=True, ) async for _ in response: pass @@ -315,7 +336,7 @@ def test_gemini_completion_image(genai, mock_client): """Ensure passing images to generate_content() won't break patching.""" img = Image.open(os.path.join(os.path.dirname(__file__), "test_data/apple.jpg")) mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_IMG_CALL)) - llm = genai.GenerativeModel('gemini-1.5-flash') + llm = genai.GenerativeModel("gemini-1.5-flash") llm.generate_content( [img, "Return a bounding box for the piranha. \n [ymin, xmin, ymax, xmax"], generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), @@ -324,13 +345,13 @@ def test_gemini_completion_image(genai, mock_client): @pytest.mark.snapshot( token="tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image", - ignores=["resource", "meta.genai.request.contents.0.text"] + ignores=["resource", "meta.genai.request.contents.0.text"], ) async def test_gemini_completion_image_async(genai, mock_client_async): """Ensure passing images to generate_content() won't break patching.""" img = Image.open(os.path.join(os.path.dirname(__file__), "test_data/apple.jpg")) mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_IMG_CALL)) - llm = genai.GenerativeModel('gemini-1.5-flash') + llm = genai.GenerativeModel("gemini-1.5-flash") await llm.generate_content_async( [img, "Return a bounding box for the piranha. \n [ymin, xmin, ymax, xmax"], generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), diff --git a/tests/contrib/google_generativeai/utils.py b/tests/contrib/google_generativeai/utils.py index ecb75988edc..c2319d50327 100644 --- a/tests/contrib/google_generativeai/utils.py +++ b/tests/contrib/google_generativeai/utils.py @@ -1,37 +1,78 @@ import collections -import mock from google.generativeai import protos +import mock MOCK_COMPLETION_SIMPLE_1 = { - "candidates": [{"content": {"parts": [{"text": "The argument for LeBron James being the 'Greatest of All Time' (" - "GOAT) is multifaceted and involves a variety of factors. Here's a " - "breakdown"}], "role": "model"}, "finish_reason": 2}], - "usage_metadata": {"prompt_token_count": 12, "candidates_token_count": 30, "total_token_count": 42} + "candidates": [ + { + "content": { + "parts": [ + { + "text": "The argument for LeBron James being the 'Greatest of All Time' (" + "GOAT) is multifaceted and involves a variety of factors. Here's a " + "breakdown" + } + ], + "role": "model", + }, + "finish_reason": 2, + } + ], + "usage_metadata": {"prompt_token_count": 12, "candidates_token_count": 30, "total_token_count": 42}, } MOCK_COMPLETION_SIMPLE_2 = { - "candidates": [{"content": {"parts": [{"text": "The sky appears blue due to a phenomenon called **Rayleigh " - "scattering**. \nHere's how it works:* **Sunlight is made up of " - "all colors of the"}], - "role": "model"}, "finish_reason": 2}], - "usage_metadata": {"prompt_token_count": 24, "candidates_token_count": 35, "total_token_count": 59} + "candidates": [ + { + "content": { + "parts": [ + { + "text": "The sky appears blue due to a phenomenon called **Rayleigh " + "scattering**. \nHere's how it works:* **Sunlight is made up of " + "all colors of the" + } + ], + "role": "model", + }, + "finish_reason": 2, + } + ], + "usage_metadata": {"prompt_token_count": 24, "candidates_token_count": 35, "total_token_count": 59}, } MOCK_COMPLETION_SIMPLE_SYSTEM = { - "candidates": [{"content": {"parts": [{"text": "Look, I respect LeBron James. He's a phenomenal player, " - "an incredible athlete, and a great ambassador for the game. But " - "when it comes to the GOAT, the crown belongs to His Airness, " - "Michael Jordan!"}], "role": "model"}, "finish_reason": 2}], + "candidates": [ + { + "content": { + "parts": [ + { + "text": "Look, I respect LeBron James. He's a phenomenal player, " + "an incredible athlete, and a great ambassador for the game. But " + "when it comes to the GOAT, the crown belongs to His Airness, " + "Michael Jordan!" + } + ], + "role": "model", + }, + "finish_reason": 2, + } + ], "usage_metadata": {"prompt_token_count": 29, "candidates_token_count": 45, "total_token_count": 74}, } MOCK_COMPLETION_STREAM_CHUNKS = ( {"text": "A", "usage_metadata": {"prompt_token_count": 6, "candidates_token_count": 1, "total_token_count": 7}}, - {"text": ", B, C, D, E, F, G, H, I", - "usage_metadata": {"prompt_token_count": 6, "candidates_token_count": 17, "total_token_count": 23}}, - {"text": ", J, K, L, M, N, O, P, Q", - "usage_metadata": {"prompt_token_count": 6, "candidates_token_count": 33, "total_token_count": 39}}, - {"text": ", R, S, T, U, V, W, X, Y, Z.\n", - "usage_metadata": {"prompt_token_count": 6, "candidates_token_count": 52, "total_token_count": 58}}, + { + "text": ", B, C, D, E, F, G, H, I", + "usage_metadata": {"prompt_token_count": 6, "candidates_token_count": 17, "total_token_count": 23}, + }, + { + "text": ", J, K, L, M, N, O, P, Q", + "usage_metadata": {"prompt_token_count": 6, "candidates_token_count": 33, "total_token_count": 39}, + }, + { + "text": ", R, S, T, U, V, W, X, Y, Z.\n", + "usage_metadata": {"prompt_token_count": 6, "candidates_token_count": 52, "total_token_count": 58}, + }, ) MOCK_COMPLETION_TOOL_CALL = { "candidates": [ @@ -43,39 +84,43 @@ "name": "set_light_values", "args": { "fields": [{"key": "color_temp", "value": "warm"}, {"key": "brightness", "value": 50}] - } + }, } } ], - "role": "model" + "role": "model", }, "finish_reason": 2, } ], - "usage_metadata": {"prompt_token_count": 150, "candidates_token_count": 25, "total_token_count": 175} + "usage_metadata": {"prompt_token_count": 150, "candidates_token_count": 25, "total_token_count": 175}, } MOCK_CHAT_COMPLETION_TOOL_RESPONSE = { "candidates": [ { "content": { - "parts": [{"text": "OK. I've dimmed the lights to 50% and set the color temperature to warm. How's that? \n"}], + "parts": [ + {"text": "OK. I've dimmed the lights to 50% and set the color temperature to warm. How's that? \n"} + ], "role": "model", }, "finish_reason": 2, }, ], - "usage_metadata": {"prompt_token_count": 206, "candidates_token_count": 27, "total_token_count": 233} + "usage_metadata": {"prompt_token_count": 206, "candidates_token_count": 27, "total_token_count": 233}, } MOCK_COMPLETION_TOOL_CALL_STREAM_CHUNKS = ( { - "function_call": {"name": "set_light_values", "args": {"fields": [{"key": "color_temp", "value": "warm"}, {"key": "brightness", "value": 50}]}}, + "function_call": { + "name": "set_light_values", + "args": {"fields": [{"key": "color_temp", "value": "warm"}, {"key": "brightness", "value": 50}]}, + }, "usage_metadata": {"prompt_token_count": 150, "candidates_token_count": 25, "total_token_count": 175}, - - }, + }, ) MOCK_COMPLETION_IMG_CALL = { "candidates": [{"content": {"parts": [{"text": "57 100 900 911"}], "role": "model"}, "finish_reason": 2}], - "usage_metadata": {"prompt_token_count": 277, "candidates_token_count": 14, "total_token_count": 291} + "usage_metadata": {"prompt_token_count": 277, "candidates_token_count": 14, "total_token_count": 291}, } @@ -115,10 +160,7 @@ def set_light_values(brightness, color_temp): Returns: A dictionary containing the set brightness and color temperature. """ - return { - "brightness": brightness, - "colorTemperature": color_temp - } + return {"brightness": brightness, "colorTemperature": color_temp} async def _async_streamed_response(mock_chunks): @@ -131,7 +173,9 @@ def _mock_completion_response(mock_completion_dict): mock_content = protos.Content(mock_completion_dict["candidates"][0]["content"]) return protos.GenerateContentResponse( { - "candidates": [{"content": mock_content, "finish_reason": mock_completion_dict["candidates"][0]["finish_reason"]}], + "candidates": [ + {"content": mock_content, "finish_reason": mock_completion_dict["candidates"][0]["finish_reason"]} + ], "usage_metadata": mock_completion_dict["usage_metadata"], } ) From 4344a6ef91b5f6b2ecff77479d023134479a3e0a Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Mon, 9 Sep 2024 17:06:14 -0400 Subject: [PATCH 08/32] Docs --- .../contrib/google_generativeai/__init__.py | 40 +++++++++---------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/ddtrace/contrib/google_generativeai/__init__.py b/ddtrace/contrib/google_generativeai/__init__.py index 5d55fa33743..e893d1b3f9b 100644 --- a/ddtrace/contrib/google_generativeai/__init__.py +++ b/ddtrace/contrib/google_generativeai/__init__.py @@ -1,42 +1,39 @@ """ -The Anthropic integration instruments the Anthropic Python library to traces for requests made to the models for messages. +The Gemini integration instruments the Google Gemini Python API to traces for requests made to Google models. -All traces submitted from the Anthropic integration are tagged by: +All traces submitted from the Gemini integration are tagged by: - ``service``, ``env``, ``version``: see the `Unified Service Tagging docs `_. -- ``anthropic.request.model``: Anthropic model used in the request. -- ``anthropic.request.api_key``: Anthropic API key used to make the request (obfuscated to match the Anthropic UI representation ``sk-...XXXX`` where ``XXXX`` is the last 4 digits of the key). -- ``anthropic.request.parameters``: Parameters used in anthropic package call. +- ``genai.request.model``: Google model used in the request. +- ``genai.request.api_key``: Google Gemini API key used to make the request (obfuscated to match the Google AI Studio UI representation ``...XXXX`` where ``XXXX`` is the last 4 digits of the key). (beta) Prompt and Completion Sampling ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Prompt texts and completion content for the ``Messages.create`` endpoint are collected in span tags with a default sampling rate of ``1.0``. +Prompt texts and completion content for the ``generateContent`` endpoint are collected in span tags with a default sampling rate of ``1.0``. These tags will have truncation applied if the text exceeds the configured character limit. Enabling ~~~~~~~~ -The Anthropic integration is enabled automatically when you use +The Gemini integration is enabled automatically when you use :ref:`ddtrace-run` or :ref:`import ddtrace.auto`. -Note that these commands also enable the ``httpx`` integration which traces HTTP requests from the Anthropic library. - -Alternatively, use :func:`patch() ` to manually enable the Anthropic integration:: +Alternatively, use :func:`patch() ` to manually enable the Gemini integration:: from ddtrace import config, patch - patch(anthropic=True) + patch(google_generativeai=True) Global Configuration ~~~~~~~~~~~~~~~~~~~~ -.. py:data:: ddtrace.config.anthropic["service"] +.. py:data:: ddtrace.config.google_generativeai["service"] - The service name reported by default for Anthropic requests. + The service name reported by default for Gemini requests. Alternatively, you can set this option with the ``DD_SERVICE`` or ``DD_ANTHROPIC_SERVICE`` environment variables. @@ -44,26 +41,26 @@ Default: ``DD_SERVICE`` -.. py:data:: (beta) ddtrace.config.anthropic["span_char_limit"] +.. py:data:: (beta) ddtrace.config.google_generativeai["span_char_limit"] Configure the maximum number of characters for the following data within span tags: - - Message inputs and completions + - Text inputs and completions Text exceeding the maximum number of characters is truncated to the character limit and has ``...`` appended to the end. - Alternatively, you can set this option with the ``DD_ANTHROPIC_SPAN_CHAR_LIMIT`` environment + Alternatively, you can set this option with the ``DD_GOOGLE_GENERATIVEAI_SPAN_CHAR_LIMIT`` environment variable. Default: ``128`` -.. py:data:: (beta) ddtrace.config.anthropic["span_prompt_completion_sample_rate"] +.. py:data:: (beta) ddtrace.config.google_generativeai["span_prompt_completion_sample_rate"] Configure the sample rate for the collection of prompts and completions as span tags. - Alternatively, you can set this option with the ``DD_ANTHROPIC_SPAN_PROMPT_COMPLETION_SAMPLE_RATE`` environment + Alternatively, you can set this option with the ``DD_GOOGLE_GENERATIVEAI_SPAN_PROMPT_COMPLETION_SAMPLE_RATE`` environment variable. Default: ``1.0`` @@ -72,14 +69,15 @@ Instance Configuration ~~~~~~~~~~~~~~~~~~~~~~ -To configure the Anthropic integration on a per-instance basis use the +To configure the Gemini integration on a per-instance basis use the ``Pin`` API:: - import anthropic + import google.generativeai as genai from ddtrace import Pin, config - Pin.override(anthropic, service="my-anthropic-service") + Pin.override(genai, service="my-gemini-service") """ # noqa: E501 + from ...internal.utils.importlib import require_modules From 8531633b63de2465c9d919f1b2c054b75d61b813 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Mon, 9 Sep 2024 17:10:13 -0400 Subject: [PATCH 09/32] More docs --- docs/index.rst | 2 ++ docs/integrations.rst | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/docs/index.rst b/docs/index.rst index 123a7a42b90..2435da88217 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -92,6 +92,8 @@ contacting support. +--------------------------------------------------+---------------+----------------+ | :ref:`gevent` (greenlet>=1.0) | >= 20.12 | Yes | +--------------------------------------------------+---------------+----------------+ +| :ref:`google_generativeai` | >= 0.7.2 | Yes | ++--------------------------------------------------+---------------+----------------+ | :ref:`grpc` | >= 1.34 | Yes [5]_ | +--------------------------------------------------+---------------+----------------+ | :ref:`graphene ` | >= 3.0.0 | Yes | diff --git a/docs/integrations.rst b/docs/integrations.rst index 3eca5427cac..614af259e17 100644 --- a/docs/integrations.rst +++ b/docs/integrations.rst @@ -189,6 +189,13 @@ gevent .. automodule:: ddtrace.contrib.gevent +.. _google_generativeai: + +google.generativeai +^^^^^^^^^^^^^^^^^^^ +.. automodule:: ddtrace.contrib.google_generativeai + + .. _graphql: graphql From d449f011cf9c2591685d2834b159ce8d8231ab89 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Mon, 9 Sep 2024 17:28:18 -0400 Subject: [PATCH 10/32] fmt --- ddtrace/contrib/google_generativeai/__init__.py | 2 +- ddtrace/contrib/internal/google_generativeai/_utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ddtrace/contrib/google_generativeai/__init__.py b/ddtrace/contrib/google_generativeai/__init__.py index e893d1b3f9b..32fa860ea8c 100644 --- a/ddtrace/contrib/google_generativeai/__init__.py +++ b/ddtrace/contrib/google_generativeai/__init__.py @@ -85,8 +85,8 @@ with require_modules(required_modules) as missing_modules: if not missing_modules: + from ..internal.google_generativeai.patch import get_version from ..internal.google_generativeai.patch import patch from ..internal.google_generativeai.patch import unpatch - from ..internal.google_generativeai.patch import get_version __all__ = ["patch", "unpatch", "get_version"] diff --git a/ddtrace/contrib/internal/google_generativeai/_utils.py b/ddtrace/contrib/internal/google_generativeai/_utils.py index 0164984dda5..efc83aa8c16 100644 --- a/ddtrace/contrib/internal/google_generativeai/_utils.py +++ b/ddtrace/contrib/internal/google_generativeai/_utils.py @@ -75,7 +75,7 @@ def _tag_request_content_part(span, integration, part, part_idx, content_idx): function_response = getattr(part, "function_response", None) if isinstance(part, dict): text = part.get("text", "") - function_call = part.get("function_call", None) # TODO: CHECK FOR DICT FUNCTION_CALL/FUNCTION_RESPONSE TYPE + function_call = part.get("function_call", None) function_response = part.get("function_response", None) span.set_tag_str("genai.request.contents.%d.parts.%d.text" % (content_idx, part_idx), integration.trunc(str(text))) if function_call: From 4c6003a4b378ae958ff44e6ac247481dd6c6b748 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Mon, 9 Sep 2024 17:52:47 -0400 Subject: [PATCH 11/32] Suitespec --- tests/.suitespec.json | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/.suitespec.json b/tests/.suitespec.json index 66a0bbc76de..d9ce1ba3672 100644 --- a/tests/.suitespec.json +++ b/tests/.suitespec.json @@ -283,6 +283,10 @@ "ddtrace/contrib/anthropic/*", "ddtrace/contrib/internal/anthropic/*" ], + "google_generativeai": [ + "ddtrace/contrib/google_generativeai/*", + "ddtrace/contrib/internal/google_generativeai/*" + ], "subprocess": [ "ddtrace/contrib/subprocess/*", "ddtrace/contrib/internal/subprocess/*" @@ -1500,6 +1504,16 @@ "tests/contrib/anthropic/*", "tests/snapshots/tests.contrib.anthropic.*" ], + "google_generativeai": [ + "@bootstrap", + "@core", + "@tracing", + "@contrib", + "@google_generativeai", + "@llmobs", + "tests/contrib/google_generativeai/*", + "tests/snapshots/tests.contrib.google_generativeai.*" + ], "runtime": [ "@core", "@runtime", From 902611a398b71ec0efc34faad453cb046ac507b1 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Mon, 9 Sep 2024 18:03:30 -0400 Subject: [PATCH 12/32] Remove from gitlab, add to circleci --- .circleci/config.templ.yml | 8 ++++++++ .gitlab/tests/contrib.yml | 5 ----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.circleci/config.templ.yml b/.circleci/config.templ.yml index 0a43a7e4ec4..f248a61c972 100644 --- a/.circleci/config.templ.yml +++ b/.circleci/config.templ.yml @@ -1020,6 +1020,14 @@ jobs: pattern: "anthropic" snapshot: true + google_generativeai: + <<: *machine_executor + parallelism: 3 + steps: + - run_test: + pattern: "google_generativeai" + snapshot: true + logbook: <<: *machine_executor steps: diff --git a/.gitlab/tests/contrib.yml b/.gitlab/tests/contrib.yml index 570d011f5bf..34c314a901f 100644 --- a/.gitlab/tests/contrib.yml +++ b/.gitlab/tests/contrib.yml @@ -38,11 +38,6 @@ gevent: variables: SUITE_NAME: "gevent" -google_genereativeai: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "google_genereativeai" - graphene: extends: .test_base_riot_snapshot variables: From 648a3ae5343989664810a426da1cbc6e235b0e50 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Mon, 9 Sep 2024 18:07:43 -0400 Subject: [PATCH 13/32] spellcheck --- docs/integrations.rst | 2 +- docs/spelling_wordlist.txt | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/integrations.rst b/docs/integrations.rst index 614af259e17..d07fbe33e45 100644 --- a/docs/integrations.rst +++ b/docs/integrations.rst @@ -191,7 +191,7 @@ gevent .. _google_generativeai: -google.generativeai +google-generativeai ^^^^^^^^^^^^^^^^^^^ .. automodule:: ddtrace.contrib.google_generativeai diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt index 3f66f358626..b27c2752aab 100644 --- a/docs/spelling_wordlist.txt +++ b/docs/spelling_wordlist.txt @@ -96,6 +96,7 @@ exec fastapi formatter gRPC +generativeai gevent graphql graphene From 4b79cbfdfc784ddb55c796421ca9bd9eafa93b72 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Tue, 10 Sep 2024 10:19:19 -0400 Subject: [PATCH 14/32] Migrate tests back to gitlab --- .circleci/config.templ.yml | 8 -------- .gitlab/tests/contrib.yml | 5 +++++ .../google_generativeai/test_google_generativeai.py | 2 +- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/.circleci/config.templ.yml b/.circleci/config.templ.yml index f248a61c972..0a43a7e4ec4 100644 --- a/.circleci/config.templ.yml +++ b/.circleci/config.templ.yml @@ -1020,14 +1020,6 @@ jobs: pattern: "anthropic" snapshot: true - google_generativeai: - <<: *machine_executor - parallelism: 3 - steps: - - run_test: - pattern: "google_generativeai" - snapshot: true - logbook: <<: *machine_executor steps: diff --git a/.gitlab/tests/contrib.yml b/.gitlab/tests/contrib.yml index 34c314a901f..570d011f5bf 100644 --- a/.gitlab/tests/contrib.yml +++ b/.gitlab/tests/contrib.yml @@ -38,6 +38,11 @@ gevent: variables: SUITE_NAME: "gevent" +google_genereativeai: + extends: .test_base_riot_snapshot + variables: + SUITE_NAME: "google_genereativeai" + graphene: extends: .test_base_riot_snapshot variables: diff --git a/tests/contrib/google_generativeai/test_google_generativeai.py b/tests/contrib/google_generativeai/test_google_generativeai.py index efc1ee96f8d..10baf181ca6 100644 --- a/tests/contrib/google_generativeai/test_google_generativeai.py +++ b/tests/contrib/google_generativeai/test_google_generativeai.py @@ -67,7 +67,7 @@ async def test_gemini_completion_async(genai, mock_client_async): ) -@pytest.mark.snapshot +@pytest.mark.snapshot(ignores=["meta.error.stack"]) def test_gemini_completion_error(genai, mock_client): llm = genai.GenerativeModel("gemini-1.5-flash") llm._client = mock.Mock() From e622d35df37dd9036a2cfebdc607b3c7f014d317 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Tue, 10 Sep 2024 11:33:03 -0400 Subject: [PATCH 15/32] fix spelling --- .gitlab/tests/contrib.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab/tests/contrib.yml b/.gitlab/tests/contrib.yml index 570d011f5bf..e37133425b4 100644 --- a/.gitlab/tests/contrib.yml +++ b/.gitlab/tests/contrib.yml @@ -38,10 +38,10 @@ gevent: variables: SUITE_NAME: "gevent" -google_genereativeai: +google_generativeai: extends: .test_base_riot_snapshot variables: - SUITE_NAME: "google_genereativeai" + SUITE_NAME: "google_generativeai" graphene: extends: .test_base_riot_snapshot From 179154554e813f9dc8c8f33d41f1891fc1dc8345 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Tue, 10 Sep 2024 11:40:54 -0400 Subject: [PATCH 16/32] Codeowners --- .github/CODEOWNERS | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 7a30206cc90..44766dfc09d 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -124,6 +124,8 @@ ddtrace/contrib/internal/botocore/services/bedrock.py @DataDog/ml-observabilit ddtrace/contrib/botocore/services/bedrock.py @DataDog/ml-observability ddtrace/contrib/internal/anthropic @DataDog/ml-observability ddtrace/contrib/anthropic @DataDog/ml-observability +ddtrace/contrib/internal/google_generativeai @DataDog/ml-observability +ddtrace/contrib/google_generativeai @DataDog/ml-observability tests/llmobs @DataDog/ml-observability tests/contrib/openai @DataDog/ml-observability tests/contrib/langchain @DataDog/ml-observability @@ -131,6 +133,8 @@ tests/contrib/botocore/test_bedrock.py @DataDog/ml-observabilit tests/contrib/botocore/test_bedrock_llmobs.py @DataDog/ml-observability tests/contrib/botocore/bedrock_cassettes @DataDog/ml-observability tests/contrib/anthropic @DataDog/ml-observability +tests/contrib/google_generativeai @DataDog/ml-observability + # Remote Config ddtrace/internal/remoteconfig @DataDog/remote-config @DataDog/apm-core-python From a6ab1ddc3b5824184afa509aca162b807d225973 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Tue, 10 Sep 2024 11:42:35 -0400 Subject: [PATCH 17/32] Move suite to llmobs gitlab --- .gitlab/tests/contrib.yml | 5 ----- .gitlab/tests/llmobs.yml | 4 ++++ 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.gitlab/tests/contrib.yml b/.gitlab/tests/contrib.yml index 3e082f8024c..98d83e4d39a 100644 --- a/.gitlab/tests/contrib.yml +++ b/.gitlab/tests/contrib.yml @@ -132,11 +132,6 @@ gevent: variables: SUITE_NAME: "gevent" -google_generativeai: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "google_generativeai" - graphene: extends: .test_base_riot_snapshot variables: diff --git a/.gitlab/tests/llmobs.yml b/.gitlab/tests/llmobs.yml index 32dca6c6d05..7abae87537c 100644 --- a/.gitlab/tests/llmobs.yml +++ b/.gitlab/tests/llmobs.yml @@ -22,3 +22,7 @@ anthropic: variables: SUITE_NAME: "anthropic" +google_generativeai: + extends: .test_base_riot_snapshot + variables: + SUITE_NAME: "google_generativeai" From f9c4cff1bffa73a24e8a9db89bcdccb8a5b3e3c8 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Tue, 10 Sep 2024 15:42:47 -0400 Subject: [PATCH 18/32] Wip llmobs integration for gemini --- .../internal/google_generativeai/_utils.py | 6 +- .../internal/google_generativeai/patch.py | 9 ++ ddtrace/llmobs/_constants.py | 1 + ddtrace/llmobs/_integrations/gemini.py | 150 +++++++++++++++++- ddtrace/llmobs/_utils.py | 3 +- .../feat-llmobs-gemini-b65c714ceef9eb12.yaml | 4 + 6 files changed, 169 insertions(+), 4 deletions(-) create mode 100644 releasenotes/notes/feat-llmobs-gemini-b65c714ceef9eb12.yaml diff --git a/ddtrace/contrib/internal/google_generativeai/_utils.py b/ddtrace/contrib/internal/google_generativeai/_utils.py index efc83aa8c16..f3f0524a074 100644 --- a/ddtrace/contrib/internal/google_generativeai/_utils.py +++ b/ddtrace/contrib/internal/google_generativeai/_utils.py @@ -28,6 +28,8 @@ def __iter__(self): else: tag_response(self._dd_span, self.__wrapped__, self._dd_integration, self._model_instance) finally: + if self._dd_integration.is_pc_sampled_llmobs(self._dd_span): + self._dd_integration.llmobs_set_tags(self._dd_span, self._args, self._kwargs, self._model_instance, self.__wrapped__) self._dd_span.finish() @@ -42,6 +44,8 @@ async def __aiter__(self): else: tag_response(self._dd_span, self.__wrapped__, self._dd_integration, self._model_instance) finally: + if self._dd_integration.is_pc_sampled_llmobs(self._dd_span): + self._dd_integration.llmobs_set_tags(self._dd_span, self._args, self._kwargs, self._model_instance, self.__wrapped__) self._dd_span.finish() @@ -168,8 +172,6 @@ def tag_request(span, integration, instance, args, kwargs): if stream: span.set_tag("genai.request.stream", True) - span.set_tag_str("genai.request.model", str(_extract_model_name(instance))) - if not integration.is_pc_sampled_span(span): return diff --git a/ddtrace/contrib/internal/google_generativeai/patch.py b/ddtrace/contrib/internal/google_generativeai/patch.py index 25b9fa945fb..07a00518580 100644 --- a/ddtrace/contrib/internal/google_generativeai/patch.py +++ b/ddtrace/contrib/internal/google_generativeai/patch.py @@ -7,6 +7,7 @@ from ddtrace.contrib.internal.google_generativeai._utils import TracedAsyncGenerateContentResponse from ddtrace.contrib.internal.google_generativeai._utils import TracedGenerateContentResponse from ddtrace.contrib.internal.google_generativeai._utils import _extract_api_key +from ddtrace.contrib.internal.google_generativeai._utils import _extract_model_name from ddtrace.contrib.internal.google_generativeai._utils import tag_request from ddtrace.contrib.internal.google_generativeai._utils import tag_response from ddtrace.contrib.trace_utils import unwrap @@ -41,6 +42,8 @@ def traced_generate(genai, pin, func, instance, args, kwargs): pin, "%s.%s" % (instance.__class__.__name__, func.__name__), provider="google", + model=_extract_model_name(instance), + submit_to_llmobs=True, ) try: tag_request(span, integration, instance, args, kwargs) @@ -57,6 +60,8 @@ def traced_generate(genai, pin, func, instance, args, kwargs): finally: # streamed spans will be finished separately once the stream generator is exhausted if span.error or not stream: + if integration.is_pc_sampled_llmobs(span): + integration.llmobs_set_tags(span, args, kwargs, instance, generations) span.finish() return generations @@ -70,6 +75,8 @@ async def traced_agenerate(genai, pin, func, instance, args, kwargs): pin, "%s.%s" % (instance.__class__.__name__, func.__name__), provider="google", + model=_extract_model_name(instance), + submit_to_llmobs=True, ) try: tag_request(span, integration, instance, args, kwargs) @@ -83,6 +90,8 @@ async def traced_agenerate(genai, pin, func, instance, args, kwargs): finally: # streamed spans will be finished separately once the stream generator is exhausted if span.error or not stream: + if integration.is_pc_sampled_llmobs(span): + integration.llmobs_set_tags(span, args, kwargs, instance, generations) span.finish() return generations diff --git a/ddtrace/llmobs/_constants.py b/ddtrace/llmobs/_constants.py index 396747bf4e6..6c6c2ae8225 100644 --- a/ddtrace/llmobs/_constants.py +++ b/ddtrace/llmobs/_constants.py @@ -23,6 +23,7 @@ "Span started while LLMObs is disabled." " Spans will not be sent to LLM Observability." ) +GEMINI_APM_SPAN_NAME = "gemini.request" LANGCHAIN_APM_SPAN_NAME = "langchain.request" OPENAI_APM_SPAN_NAME = "openai.request" diff --git a/ddtrace/llmobs/_integrations/gemini.py b/ddtrace/llmobs/_integrations/gemini.py index 258c9e16fdb..524218060e3 100644 --- a/ddtrace/llmobs/_integrations/gemini.py +++ b/ddtrace/llmobs/_integrations/gemini.py @@ -1,5 +1,153 @@ -from .base import BaseLLMIntegration +import json +from typing import Any +from typing import Dict +from typing import Optional + +from ddtrace import Span +from ddtrace.llmobs._constants import INPUT_MESSAGES +from ddtrace.llmobs._constants import METADATA +from ddtrace.llmobs._constants import METRICS +from ddtrace.llmobs._constants import MODEL_NAME +from ddtrace.llmobs._constants import MODEL_PROVIDER +from ddtrace.llmobs._constants import OUTPUT_MESSAGES +from ddtrace.llmobs._constants import INPUT_TOKENS_METRIC_KEY +from ddtrace.llmobs._constants import OUTPUT_TOKENS_METRIC_KEY +from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY +from ddtrace.llmobs._constants import SPAN_KIND +from ddtrace.llmobs._utils import _unserializable_default_repr +from ddtrace.llmobs._integrations.base import BaseLLMIntegration +from ddtrace.internal.utils import get_argument_value class GeminiIntegration(BaseLLMIntegration): _integration_name = "gemini" + + def _set_base_span_tags( + self, span: Span, provider: Optional[str] = None, model: Optional[str] = None, **kwargs: Dict[str, Any] + ) -> None: + if provider: + span.set_tag_str("genai.request.model", model) + if model: + span.set_tag_str("genai.request.provider", provider) + + def llmobs_set_tags(self, span: Span, args: Any, kwargs: Any, instance: Any, generations: Any = None) -> None: + if not self.llmobs_enabled: + return + + span.set_tag_str(SPAN_KIND, "llm") + span.set_tag_str(MODEL_NAME, span.get_tag("genai.request.model") or "") + span.set_tag_str(MODEL_PROVIDER, span.get_tag("genai.request.provider") or "") + + metadata = self._llmobs_set_metadata(kwargs, instance) + span.set_tag_str(METADATA, json.dumps(metadata, default=_unserializable_default_repr)) + + system_instruction = _get_attr(instance, "_system_instruction", None) + input_contents = get_argument_value(args, kwargs, 0, "contents") + input_messages = self._extract_input_message(input_contents, system_instruction) + span.set_tag_str(INPUT_MESSAGES, json.dumps(input_messages, default=_unserializable_default_repr)) + + if span.error or generations is None: + span.set_tag_str(OUTPUT_MESSAGES, json.dumps([{"content": ""}])) + else: + output_messages = self._extract_output_message(generations) + span.set_tag_str(OUTPUT_MESSAGES, json.dumps(output_messages, default=_unserializable_default_repr)) + + usage = self._get_llmobs_metrics_tags(span) + if usage: + span.set_tag_str(METRICS, json.dumps(usage, default=_unserializable_default_repr)) + + @staticmethod + def _llmobs_set_metadata(kwargs, instance): + metadata = {} + breakpoint() + model_config = instance._generation_config or {} + request_config = kwargs.get("generation_config", {}) + parameters = ("temperature", "max_output_tokens", "candidate_count", "top_p", "top_k") + for param in parameters: + model_config_value = _get_attr(model_config, param, None) + request_config_value = _get_attr(request_config, param, None) + if model_config_value or request_config_value: + metadata[param] = request_config_value or model_config_value + return metadata + + @staticmethod + def _extract_input_message(contents, system_instruction=None): + messages = [] + if system_instruction: + for idx, part in enumerate(system_instruction.parts): + messages.append({"content": part.text or "", "role": "system"}) + if isinstance(contents, str): + messages.append({"content": contents, "role": "user"}) + return messages + elif isinstance(contents, dict): + messages.append({"content": contents.get("text", ""), "role": contents.get("role", "user")}) + return messages + elif not isinstance(contents, list): + return messages + for content_idx, content in enumerate(contents): + if isinstance(content, str): + messages.append({"content": content, "role": "user"}) + continue + role = _get_attr(content, "role", "user") + parts = _get_attr(content, "parts", []) + if not parts: + messages.append({"content": "[Non-text content object: {}]".format(repr(content)), "role": role}) + for part in parts: + text = _get_attr(part, "text", "") + function_call = _get_attr(part, "function_call", None) + function_response = _get_attr(part, "function_response", None) + message = {"content": text, "role": role} + if function_call: + function_call_dict = type(function_call).to_dict(function_call) + message["tool_calls"] = [ + {"name": function_call_dict.get("name", ""), "arguments": function_call_dict.get("args", {})} + ] + if function_response: + function_response_dict = type(function_response).to_dict(function_response) + message["content"] = "[tool result: {}]".format(function_response_dict.get("response", "")) + messages.append(message) + return messages + + @staticmethod + def _extract_output_message(generations): + output_messages = [] + generations_dict = generations.to_dict() + for idx, candidate in enumerate(generations_dict.get("candidates", [])): + content = candidate.get("content", {}) + role = content.get("role", "model") + parts = content.get("parts", []) + for part_idx, part in enumerate(parts): + text = part.get("text", "") + function_call = part.get("function_call", None) + if not function_call: + output_messages.append({"content": text, "role": role}) + continue + function_name = function_call.get("name", "") + function_args = function_call.get("args", {}) + output_messages.append( + {"content": text, "role": role, "tool_calls": [{"name": function_name, "arguments": function_args}]} + ) + return output_messages + + @staticmethod + def _get_llmobs_metrics_tags(span): + usage = {} + input_tokens = span.get_metric("genai.response.usage.prompt_tokens") + output_tokens = span.get_metric("genai.response.usage.completion_tokens") + total_tokens = span.get_metric("genai.response.usage.total_tokens") + + if input_tokens is not None: + usage[INPUT_TOKENS_METRIC_KEY] = input_tokens + if output_tokens is not None: + usage[OUTPUT_TOKENS_METRIC_KEY] = output_tokens + if total_tokens is not None: + usage[TOTAL_TOKENS_METRIC_KEY] = total_tokens + return usage + + +def _get_attr(o: Any, attr: str, default: Any): + # Convenience method to get an attribute from an object or dict + if isinstance(o, dict): + return o.get(attr, default) + else: + return getattr(o, attr, default) diff --git a/ddtrace/llmobs/_utils.py b/ddtrace/llmobs/_utils.py index 7e7ff192b67..03d07ce6fb0 100644 --- a/ddtrace/llmobs/_utils.py +++ b/ddtrace/llmobs/_utils.py @@ -5,6 +5,7 @@ from ddtrace import config from ddtrace.ext import SpanTypes from ddtrace.internal.logger import get_logger +from ddtrace.llmobs._constants import GEMINI_APM_SPAN_NAME from ddtrace.llmobs._constants import LANGCHAIN_APM_SPAN_NAME from ddtrace.llmobs._constants import ML_APP from ddtrace.llmobs._constants import OPENAI_APM_SPAN_NAME @@ -39,7 +40,7 @@ def _get_llmobs_parent_id(span: Span) -> Optional[str]: def _get_span_name(span: Span) -> str: - if span.name == LANGCHAIN_APM_SPAN_NAME and span.resource != "": + if span.name in (LANGCHAIN_APM_SPAN_NAME, GEMINI_APM_SPAN_NAME) and span.resource != "": return span.resource elif span.name == OPENAI_APM_SPAN_NAME and span.resource != "": return "openai.{}".format(span.resource) diff --git a/releasenotes/notes/feat-llmobs-gemini-b65c714ceef9eb12.yaml b/releasenotes/notes/feat-llmobs-gemini-b65c714ceef9eb12.yaml new file mode 100644 index 00000000000..80ce9a87b93 --- /dev/null +++ b/releasenotes/notes/feat-llmobs-gemini-b65c714ceef9eb12.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + LLM Observability: Adds support to automatically submit Gemini Python SDK calls to LLM Observability. From 1ddb560808867d5602f3be3bf823ff8acc423504 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Tue, 10 Sep 2024 16:09:28 -0400 Subject: [PATCH 19/32] fmt, remove breakpoint --- .../contrib/internal/google_generativeai/_utils.py | 8 ++++++-- ddtrace/llmobs/_integrations/gemini.py | 11 +++++------ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/ddtrace/contrib/internal/google_generativeai/_utils.py b/ddtrace/contrib/internal/google_generativeai/_utils.py index f3f0524a074..e635e2e9107 100644 --- a/ddtrace/contrib/internal/google_generativeai/_utils.py +++ b/ddtrace/contrib/internal/google_generativeai/_utils.py @@ -29,7 +29,9 @@ def __iter__(self): tag_response(self._dd_span, self.__wrapped__, self._dd_integration, self._model_instance) finally: if self._dd_integration.is_pc_sampled_llmobs(self._dd_span): - self._dd_integration.llmobs_set_tags(self._dd_span, self._args, self._kwargs, self._model_instance, self.__wrapped__) + self._dd_integration.llmobs_set_tags( + self._dd_span, self._args, self._kwargs, self._model_instance, self.__wrapped__ + ) self._dd_span.finish() @@ -45,7 +47,9 @@ async def __aiter__(self): tag_response(self._dd_span, self.__wrapped__, self._dd_integration, self._model_instance) finally: if self._dd_integration.is_pc_sampled_llmobs(self._dd_span): - self._dd_integration.llmobs_set_tags(self._dd_span, self._args, self._kwargs, self._model_instance, self.__wrapped__) + self._dd_integration.llmobs_set_tags( + self._dd_span, self._args, self._kwargs, self._model_instance, self.__wrapped__ + ) self._dd_span.finish() diff --git a/ddtrace/llmobs/_integrations/gemini.py b/ddtrace/llmobs/_integrations/gemini.py index 524218060e3..4925f6cd53a 100644 --- a/ddtrace/llmobs/_integrations/gemini.py +++ b/ddtrace/llmobs/_integrations/gemini.py @@ -4,26 +4,26 @@ from typing import Optional from ddtrace import Span +from ddtrace.internal.utils import get_argument_value from ddtrace.llmobs._constants import INPUT_MESSAGES +from ddtrace.llmobs._constants import INPUT_TOKENS_METRIC_KEY from ddtrace.llmobs._constants import METADATA from ddtrace.llmobs._constants import METRICS from ddtrace.llmobs._constants import MODEL_NAME from ddtrace.llmobs._constants import MODEL_PROVIDER from ddtrace.llmobs._constants import OUTPUT_MESSAGES -from ddtrace.llmobs._constants import INPUT_TOKENS_METRIC_KEY from ddtrace.llmobs._constants import OUTPUT_TOKENS_METRIC_KEY -from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY from ddtrace.llmobs._constants import SPAN_KIND -from ddtrace.llmobs._utils import _unserializable_default_repr +from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY from ddtrace.llmobs._integrations.base import BaseLLMIntegration -from ddtrace.internal.utils import get_argument_value +from ddtrace.llmobs._utils import _unserializable_default_repr class GeminiIntegration(BaseLLMIntegration): _integration_name = "gemini" def _set_base_span_tags( - self, span: Span, provider: Optional[str] = None, model: Optional[str] = None, **kwargs: Dict[str, Any] + self, span: Span, provider: Optional[str] = None, model: Optional[str] = None, **kwargs: Dict[str, Any] ) -> None: if provider: span.set_tag_str("genai.request.model", model) @@ -59,7 +59,6 @@ def llmobs_set_tags(self, span: Span, args: Any, kwargs: Any, instance: Any, gen @staticmethod def _llmobs_set_metadata(kwargs, instance): metadata = {} - breakpoint() model_config = instance._generation_config or {} request_config = kwargs.get("generation_config", {}) parameters = ("temperature", "max_output_tokens", "candidate_count", "top_p", "top_k") From f32abd5f0f916fba632fc647ef2635b6e6699e70 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Tue, 10 Sep 2024 17:24:30 -0400 Subject: [PATCH 20/32] Add tests --- tests/contrib/google_generativeai/conftest.py | 18 + .../test_google_generativeai.py | 4 +- .../test_google_generativeai_llmobs.py | 549 ++++++++++++++++++ tests/llmobs/_utils.py | 26 +- ...rativeai.test_gemini_completion_image.json | 2 +- 5 files changed, 586 insertions(+), 13 deletions(-) create mode 100644 tests/contrib/google_generativeai/test_google_generativeai_llmobs.py diff --git a/tests/contrib/google_generativeai/conftest.py b/tests/contrib/google_generativeai/conftest.py index 1a4a5c057a6..7da872255c3 100644 --- a/tests/contrib/google_generativeai/conftest.py +++ b/tests/contrib/google_generativeai/conftest.py @@ -1,9 +1,11 @@ import os +import mock import pytest from ddtrace.contrib.google_generativeai import patch from ddtrace.contrib.google_generativeai import unpatch +from ddtrace.llmobs import LLMObs from ddtrace.pin import Pin from tests.contrib.google_generativeai.utils import MockGenerativeModelAsyncClient from tests.contrib.google_generativeai.utils import MockGenerativeModelClient @@ -35,11 +37,27 @@ def mock_tracer(ddtrace_global_config, genai): mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) pin.override(genai, tracer=mock_tracer) pin.tracer.configure() + if ddtrace_global_config.get("_llmobs_enabled", False): + # Have to disable and re-enable LLMObs to use to mock tracer. + LLMObs.disable() + LLMObs.enable(_tracer=mock_tracer, integrations_enabled=False) yield mock_tracer except Exception: yield +@pytest.fixture +def mock_llmobs_writer(): + patcher = mock.patch("ddtrace.llmobs._llmobs.LLMObsSpanWriter") + try: + LLMObsSpanWriterMock = patcher.start() + m = mock.MagicMock() + LLMObsSpanWriterMock.return_value = m + yield m + finally: + patcher.stop() + + @pytest.fixture def mock_client(): yield MockGenerativeModelClient() diff --git a/tests/contrib/google_generativeai/test_google_generativeai.py b/tests/contrib/google_generativeai/test_google_generativeai.py index 10baf181ca6..79490af9ec8 100644 --- a/tests/contrib/google_generativeai/test_google_generativeai.py +++ b/tests/contrib/google_generativeai/test_google_generativeai.py @@ -338,7 +338,7 @@ def test_gemini_completion_image(genai, mock_client): mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_IMG_CALL)) llm = genai.GenerativeModel("gemini-1.5-flash") llm.generate_content( - [img, "Return a bounding box for the piranha. \n [ymin, xmin, ymax, xmax"], + [img, "Return a bounding box for the apple. \n [ymin, xmin, ymax, xmax]"], generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), ) @@ -353,6 +353,6 @@ async def test_gemini_completion_image_async(genai, mock_client_async): mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_IMG_CALL)) llm = genai.GenerativeModel("gemini-1.5-flash") await llm.generate_content_async( - [img, "Return a bounding box for the piranha. \n [ymin, xmin, ymax, xmax"], + [img, "Return a bounding box for the apple. \n [ymin, xmin, ymax, xmax]"], generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), ) diff --git a/tests/contrib/google_generativeai/test_google_generativeai_llmobs.py b/tests/contrib/google_generativeai/test_google_generativeai_llmobs.py new file mode 100644 index 00000000000..a293215b8d7 --- /dev/null +++ b/tests/contrib/google_generativeai/test_google_generativeai_llmobs.py @@ -0,0 +1,549 @@ +import os + +from PIL import Image +from google.api_core.exceptions import InvalidArgument +import mock +import pytest + +from tests.llmobs._utils import _expected_llmobs_llm_span_event +from tests.contrib.google_generativeai.utils import _mock_completion_response +from tests.contrib.google_generativeai.utils import _mock_completion_stream_chunk +from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_SIMPLE_1 +from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_SIMPLE_2 +from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_SIMPLE_SYSTEM +from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_STREAM_CHUNKS +from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_TOOL_CALL_STREAM_CHUNKS +from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_TOOL_CALL +from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_IMG_CALL +from tests.contrib.google_generativeai.utils import _async_streamed_response +from tests.contrib.google_generativeai.utils import set_light_values + + +@pytest.mark.parametrize( + "ddtrace_global_config", [dict(_llmobs_enabled=True, _llmobs_sample_rate=1.0, _llmobs_ml_app="")] +) +class TestLLMObsGemini: + def test_completion(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client, mock_tracer): + mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_1)) + llm = genai.GenerativeModel("gemini-1.5-flash") + llm.generate_content( + "What is the argument for LeBron James being the GOAT?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, + temperature=1.0), + ) + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + expected_llmobs_span_event = _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[{"content": "What is the argument for LeBron James being the GOAT?", "role": "user"}], + output_messages=[ + {"content": MOCK_COMPLETION_SIMPLE_1["candidates"][0]["content"]["parts"][0]["text"], "role": "model"}, + ], + metadata={"temperature": 1.0, "max_output_tokens": 35}, + token_metrics={"input_tokens": 12, "output_tokens": 30, "total_tokens": 42}, + tags={"ml_app": ""}, + integration="gemini" + ) + mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) + + async def test_completion_async( + self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer + ): + mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_1)) + llm = genai.GenerativeModel("gemini-1.5-flash") + await llm.generate_content_async( + "What is the argument for LeBron James being the GOAT?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, + temperature=1.0), + ) + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + expected_llmobs_span_event = _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[{"content": "What is the argument for LeBron James being the GOAT?", "role": "user"}], + output_messages=[ + {"content": MOCK_COMPLETION_SIMPLE_1["candidates"][0]["content"]["parts"][0]["text"], "role": "model"} + ], + metadata={"temperature": 1.0, "max_output_tokens": 35}, + token_metrics={"input_tokens": 12, "output_tokens": 30, "total_tokens": 42}, + tags={"ml_app": ""}, + integration="gemini" + ) + mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) + + def test_completion_error(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client, mock_tracer): + llm = genai.GenerativeModel("gemini-1.5-flash") + llm._client = mock.Mock() + llm._client.generate_content.side_effect = InvalidArgument("Invalid API key. Please pass a valid API key.") + with pytest.raises(InvalidArgument): + llm.generate_content( + "What is the argument for LeBron James being the GOAT?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, + temperature=1.0), + ) + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + mock_llmobs_writer.enqueue.assert_called_with( + _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[{"content": "What is the argument for LeBron James being the GOAT?", "role": "user"}], + output_messages=[{"content": ""}], + error="google.api_core.exceptions.InvalidArgument", + error_message=span.get_tag("error.message"), + error_stack=span.get_tag("error.stack"), + metadata={"temperature": 1.0, "max_output_tokens": 35}, + tags={"ml_app": ""}, + integration="gemini" + ) + ) + + async def test_completion_error_async( + self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer + ): + llm = genai.GenerativeModel("gemini-1.5-flash") + llm._async_client = mock.Mock() + llm._async_client.generate_content.side_effect = InvalidArgument( + "Invalid API key. Please pass a valid API key.") + with pytest.raises(InvalidArgument): + await llm.generate_content_async( + "What is the argument for LeBron James being the GOAT?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, + temperature=1.0), + ) + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + mock_llmobs_writer.enqueue.assert_called_with( + _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[{"content": "What is the argument for LeBron James being the GOAT?", "role": "user"}], + output_messages=[{"content": ""}], + error="google.api_core.exceptions.InvalidArgument", + error_message=span.get_tag("error.message"), + error_stack=span.get_tag("error.stack"), + metadata={"temperature": 1.0, "max_output_tokens": 35}, + tags={"ml_app": ""}, + integration="gemini" + ) + ) + + def test_completion_multiple_messages(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client, mock_tracer): + mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_2)) + llm = genai.GenerativeModel("gemini-1.5-flash") + llm.generate_content( + [ + {"role": "user", "parts": [{"text": "Hello world!"}]}, + {"role": "model", "parts": [{"text": "Great to meet you. What would you like to know?"}]}, + {"role": "user", "parts": [{"text": "Why is the sky blue?"}]}, + ], + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + ) + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + expected_llmobs_span_event = _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[ + {"content": "Hello world!", "role": "user"}, + {"content": "Great to meet you. What would you like to know?", "role": "model"}, + {"content": "Why is the sky blue?", "role": "user"}, + ], + output_messages=[ + {"content": MOCK_COMPLETION_SIMPLE_2["candidates"][0]["content"]["parts"][0]["text"], "role": "model"} + ], + metadata={"temperature": 1.0, "max_output_tokens": 35}, + token_metrics={"input_tokens": 24, "output_tokens": 35, "total_tokens": 59}, + tags={"ml_app": ""}, + integration="gemini" + ) + mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) + + async def test_completion_multiple_messages_async( + self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer + ): + mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_2)) + llm = genai.GenerativeModel("gemini-1.5-flash") + await llm.generate_content_async( + [ + {"role": "user", "parts": [{"text": "Hello world!"}]}, + {"role": "model", "parts": [{"text": "Great to meet you. What would you like to know?"}]}, + {"role": "user", "parts": [{"text": "Why is the sky blue?"}]}, + ], + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + ) + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + expected_llmobs_span_event = _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[ + {"content": "Hello world!", "role": "user"}, + {"content": "Great to meet you. What would you like to know?", "role": "model"}, + {"content": "Why is the sky blue?", "role": "user"}, + ], + output_messages=[ + {"content": MOCK_COMPLETION_SIMPLE_2["candidates"][0]["content"]["parts"][0]["text"], "role": "model"} + ], + metadata={"temperature": 1.0, "max_output_tokens": 35}, + token_metrics={"input_tokens": 24, "output_tokens": 35, "total_tokens": 59}, + tags={"ml_app": ""}, + integration="gemini" + ) + mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) + + def test_chat_completion(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client, mock_tracer): + mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_2)) + llm = genai.GenerativeModel("gemini-1.5-flash") + chat = llm.start_chat( + history=[ + {"role": "user", "parts": "Hello world!"}, + {"role": "model", "parts": "Great to meet you. What would you like to know?"}, + ] + ) + chat.send_message( + "Why is the sky blue?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + ) + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + expected_llmobs_span_event = _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[ + {"content": "Hello world!", "role": "user"}, + {"content": "Great to meet you. What would you like to know?", "role": "model"}, + {"content": "Why is the sky blue?", "role": "user"}, + ], + output_messages=[ + {"content": MOCK_COMPLETION_SIMPLE_2["candidates"][0]["content"]["parts"][0]["text"], "role": "model"} + ], + metadata={"temperature": 1.0, "max_output_tokens": 35}, + token_metrics={"input_tokens": 24, "output_tokens": 35, "total_tokens": 59}, + tags={"ml_app": ""}, + integration="gemini" + ) + mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) + + async def test_chat_completion_async(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer): + mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_2)) + llm = genai.GenerativeModel("gemini-1.5-flash") + chat = llm.start_chat( + history=[ + {"role": "user", "parts": "Hello world!"}, + {"role": "model", "parts": "Great to meet you. What would you like to know?"}, + ] + ) + await chat.send_message_async( + "Why is the sky blue?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + ) + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + expected_llmobs_span_event = _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[ + {"content": "Hello world!", "role": "user"}, + {"content": "Great to meet you. What would you like to know?", "role": "model"}, + {"content": "Why is the sky blue?", "role": "user"}, + ], + output_messages=[ + {"content": MOCK_COMPLETION_SIMPLE_2["candidates"][0]["content"]["parts"][0]["text"], "role": "model"} + ], + metadata={"temperature": 1.0, "max_output_tokens": 35}, + token_metrics={"input_tokens": 24, "output_tokens": 35, "total_tokens": 59}, + tags={"ml_app": ""}, + integration="gemini" + ) + mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) + + def test_completion_system_prompt(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client, mock_tracer): + mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_SYSTEM)) + llm = genai.GenerativeModel( + "gemini-1.5-flash", + system_instruction="You are a die-hard Michael Jordan fan that always brings stats to the discussion.", + ) + llm.generate_content( + "What is the argument for LeBron James being the GOAT?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=50, temperature=1.0), + ) + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + expected_llmobs_span_event = _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[ + {"content": "You are a die-hard Michael Jordan fan that always brings stats to the discussion.", "role": "system"}, + {"content": "What is the argument for LeBron James being the GOAT?", "role": "user"}, + ], + output_messages=[ + {"content": MOCK_COMPLETION_SIMPLE_SYSTEM["candidates"][0]["content"]["parts"][0]["text"], "role": "model"} + ], + metadata={"temperature": 1.0, "max_output_tokens": 50}, + token_metrics={"input_tokens": 29, "output_tokens": 45, "total_tokens": 74}, + tags={"ml_app": ""}, + integration="gemini" + ) + mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) + + async def test_completion_system_prompt_async(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer): + mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_SYSTEM)) + llm = genai.GenerativeModel( + "gemini-1.5-flash", + system_instruction="You are a die-hard Michael Jordan fan that always brings stats to the discussion.", + ) + await llm.generate_content_async( + "What is the argument for LeBron James being the GOAT?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=50, temperature=1.0), + ) + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + expected_llmobs_span_event = _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[ + {"content": "You are a die-hard Michael Jordan fan that always brings stats to the discussion.", "role": "system"}, + {"content": "What is the argument for LeBron James being the GOAT?", "role": "user"}, + ], + output_messages=[ + {"content": MOCK_COMPLETION_SIMPLE_SYSTEM["candidates"][0]["content"]["parts"][0]["text"], "role": "model"}, + ], + metadata={"temperature": 1.0, "max_output_tokens": 50}, + token_metrics={"input_tokens": 29, "output_tokens": 45, "total_tokens": 74}, + tags={"ml_app": ""}, + integration="gemini" + ) + mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) + + def test_completion_stream(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client, mock_tracer): + mock_client.responses["stream_generate_content"] = [ + (_mock_completion_stream_chunk(chunk) for chunk in MOCK_COMPLETION_STREAM_CHUNKS) + ] + llm = genai.GenerativeModel("gemini-1.5-flash") + response = llm.generate_content( + "Can you recite the alphabet?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=60, temperature=1.0), + stream=True, + ) + for _ in response: + pass + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + expected_llmobs_span_event = _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[{"content": "Can you recite the alphabet?", "role": "user"}], + output_messages=[ + {"content": "".join(chunk["text"] for chunk in MOCK_COMPLETION_STREAM_CHUNKS), "role": "model"} + ], + metadata={"temperature": 1.0, "max_output_tokens": 60}, + token_metrics={"input_tokens": 6, "output_tokens": 52, "total_tokens": 58}, + tags={"ml_app": ""}, + integration="gemini" + ) + mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) + + async def test_completion_stream_async( + self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer + ): + mock_client_async.responses["stream_generate_content"] = [_async_streamed_response(MOCK_COMPLETION_STREAM_CHUNKS)] + llm = genai.GenerativeModel("gemini-1.5-flash") + response = await llm.generate_content_async( + "Can you recite the alphabet?", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=60, temperature=1.0), + stream=True, + ) + async for _ in response: + pass + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + expected_llmobs_span_event = _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[{"content": "Can you recite the alphabet?", "role": "user"}], + output_messages=[ + {"content": "".join(chunk["text"] for chunk in MOCK_COMPLETION_STREAM_CHUNKS), "role": "model"} + ], + metadata={"temperature": 1.0, "max_output_tokens": 60}, + token_metrics={"input_tokens": 6, "output_tokens": 52, "total_tokens": 58}, + tags={"ml_app": ""}, + integration="gemini" + ) + mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) + + def test_completion_tool_call(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client, mock_tracer): + mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_TOOL_CALL)) + llm = genai.GenerativeModel("gemini-1.5-flash", tools=[set_light_values]) + llm.generate_content( + "Dim the lights so the room feels cozy and warm.", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), + ) + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + expected_llmobs_span_event = _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[{"content": "Dim the lights so the room feels cozy and warm.", "role": "user"}], + output_messages=[ + {"content": "", "role": "model", "tool_calls": [{"name": "set_light_values", "arguments": {"fields": [{"key": "color_temp", "value": "warm"}, {"key": "brightness", "value": 50}]}}]} + ], + metadata={"temperature": 1.0, "max_output_tokens": 30}, + token_metrics={"input_tokens": 150, "output_tokens": 25, "total_tokens": 175}, + tags={"ml_app": ""}, + integration="gemini" + ) + mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) + + async def test_completion_tool_call_async(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer): + mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_TOOL_CALL)) + llm = genai.GenerativeModel("gemini-1.5-flash", tools=[set_light_values]) + await llm.generate_content_async( + "Dim the lights so the room feels cozy and warm.", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), + ) + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + expected_llmobs_span_event = _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[{"content": "Dim the lights so the room feels cozy and warm.", "role": "user"}], + output_messages=[ + {"content": "", "role": "model", "tool_calls": [{"name": "set_light_values", "arguments": {"fields": [{"key": "color_temp", "value": "warm"}, {"key": "brightness", "value": 50}]}}]} + ], + metadata={"temperature": 1.0, "max_output_tokens": 30}, + token_metrics={"input_tokens": 150, "output_tokens": 25, "total_tokens": 175}, + tags={"ml_app": ""}, + integration="gemini" + ) + mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) + + def test_gemini_completion_tool_stream(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client, mock_tracer): + mock_client.responses["stream_generate_content"] = [ + (_mock_completion_stream_chunk(chunk) for chunk in MOCK_COMPLETION_TOOL_CALL_STREAM_CHUNKS) + ] + llm = genai.GenerativeModel("gemini-1.5-flash", tools=[set_light_values]) + response = llm.generate_content( + "Dim the lights so the room feels cozy and warm.", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), + stream=True, + ) + for _ in response: + pass + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + expected_llmobs_span_event = _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[{"content": "Dim the lights so the room feels cozy and warm.", "role": "user"}], + output_messages=[ + {"content": "", "role": "model", "tool_calls": [{"name": "set_light_values", "arguments": { + "fields": [{"key": "color_temp", "value": "warm"}, {"key": "brightness", "value": 50}]}}]} + ], + metadata={"temperature": 1.0, "max_output_tokens": 30}, + token_metrics={"input_tokens": 150, "output_tokens": 25, "total_tokens": 175}, + tags={"ml_app": ""}, + integration="gemini" + ) + mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) + + async def test_gemini_completion_tool_stream_async(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer): + mock_client_async.responses["stream_generate_content"] = [ + _async_streamed_response(MOCK_COMPLETION_TOOL_CALL_STREAM_CHUNKS) + ] + llm = genai.GenerativeModel("gemini-1.5-flash", tools=[set_light_values]) + response = await llm.generate_content_async( + "Dim the lights so the room feels cozy and warm.", + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), + stream=True, + ) + async for _ in response: + pass + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + expected_llmobs_span_event = _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[{"content": "Dim the lights so the room feels cozy and warm.", "role": "user"}], + output_messages=[ + {"content": "", "role": "model", "tool_calls": [{"name": "set_light_values", "arguments": { + "fields": [{"key": "color_temp", "value": "warm"}, {"key": "brightness", "value": 50}]}}]} + ], + metadata={"temperature": 1.0, "max_output_tokens": 30}, + token_metrics={"input_tokens": 150, "output_tokens": 25, "total_tokens": 175}, + tags={"ml_app": ""}, + integration="gemini" + ) + mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) + + def test_gemini_completion_image(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client, mock_tracer): + """Ensure passing images to generate_content() won't break patching.""" + img = Image.open(os.path.join(os.path.dirname(__file__), "test_data/apple.jpg")) + mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_IMG_CALL)) + llm = genai.GenerativeModel("gemini-1.5-flash") + llm.generate_content( + [img, "Return a bounding box for the apple. \n [ymin, xmin, ymax, xmax]"], + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), + ) + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + expected_llmobs_span_event = _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[ + {"content": "[Non-text content object: {}]".format(repr(img)), "role": "user"}, + {"content": "Return a bounding box for the apple. \n [ymin, xmin, ymax, xmax]", "role": "user"}, + ], + output_messages=[{"content": "57 100 900 911", "role": "model"}], + metadata={"temperature": 1.0, "max_output_tokens": 30}, + token_metrics={"input_tokens": 277, "output_tokens": 14, "total_tokens": 291}, + tags={"ml_app": ""}, + integration="gemini" + ) + mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) + + async def test_gemini_completion_image_async(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer): + """Ensure passing images to generate_content() won't break patching.""" + img = Image.open(os.path.join(os.path.dirname(__file__), "test_data/apple.jpg")) + mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_IMG_CALL)) + llm = genai.GenerativeModel("gemini-1.5-flash") + await llm.generate_content_async( + [img, "Return a bounding box for the apple. \n [ymin, xmin, ymax, xmax]"], + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), + ) + span = mock_tracer.pop_traces()[0][0] + assert mock_llmobs_writer.enqueue.call_count == 1 + expected_llmobs_span_event = _expected_llmobs_llm_span_event( + span, + model_name="gemini-1.5-flash", + model_provider="google", + input_messages=[ + {"content": "[Non-text content object: {}]".format(repr(img)), "role": "user"}, + {"content": "Return a bounding box for the apple. \n [ymin, xmin, ymax, xmax]", "role": "user"}, + ], + output_messages=[{"content": "57 100 900 911", "role": "model"}], + metadata={"temperature": 1.0, "max_output_tokens": 30}, + token_metrics={"input_tokens": 277, "output_tokens": 14, "total_tokens": 291}, + tags={"ml_app": ""}, + integration="gemini" + ) + mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) diff --git a/tests/llmobs/_utils.py b/tests/llmobs/_utils.py index e8cc03d6ee7..ffb8aa303df 100644 --- a/tests/llmobs/_utils.py +++ b/tests/llmobs/_utils.py @@ -1,21 +1,27 @@ import os import mock -import vcr +try: + import vcr +except ImportError: + vcr = None import ddtrace from ddtrace._trace.span import Span from ddtrace.ext import SpanTypes -logs_vcr = vcr.VCR( - cassette_library_dir=os.path.join(os.path.dirname(__file__), "llmobs_cassettes/"), - record_mode="once", - match_on=["path"], - filter_headers=[("DD-API-KEY", "XXXXXX")], - # Ignore requests to the agent - ignore_localhost=True, -) +if vcr: + logs_vcr = vcr.VCR( + cassette_library_dir=os.path.join(os.path.dirname(__file__), "llmobs_cassettes/"), + record_mode="once", + match_on=["path"], + filter_headers=[("DD-API-KEY", "XXXXXX")], + # Ignore requests to the agent + ignore_localhost=True, + ) +else: + logs_vcr = None def _expected_llmobs_tags(span, error=None, tags=None, session_id=None): @@ -180,7 +186,7 @@ def _llmobs_base_span_event( integration=None, ): span_name = span.name - if integration == "langchain": + if integration in ("langchain", "gemini"): span_name = span.resource elif integration == "openai": span_name = "openai.{}".format(span.resource) diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json index a641ad0225a..0c8a82a3d5d 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json @@ -13,7 +13,7 @@ "_dd.p.tid": "66df5b6b00000000", "genai.request.api_key": "...key>", "genai.request.contents.0.text": "[Non-text content object: ]", - "genai.request.contents.1.text": "Return a bounding box for the piranha. \\n [ymin, xmin, ymax, xmax", + "genai.request.contents.1.text": "Return a bounding box for the apple. \\n [ymin, xmin, ymax, xmax]", "genai.request.generation_config.candidate_count": "None", "genai.request.generation_config.max_output_tokens": "30", "genai.request.generation_config.response_mime_type": "None", From 91f8ee37ab36f114388bbef7923d5ff36fa1750d Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Tue, 10 Sep 2024 18:00:50 -0400 Subject: [PATCH 21/32] Address comments, fix snapshots --- .riot/requirements/{1736f46.txt => 1e15a25.txt} | 14 +++++++------- .riot/requirements/{1025297.txt => 1f54e6b.txt} | 14 +++++++------- .riot/requirements/{9b83479.txt => e8247d6.txt} | 14 +++++++------- .riot/requirements/{1b72277.txt => ebe4ea5.txt} | 14 +++++++------- ddtrace/contrib/google_generativeai/__init__.py | 2 +- .../internal/google_generativeai/_utils.py | 2 -- .../contrib/internal/google_generativeai/patch.py | 3 +++ ddtrace/llmobs/_integrations/gemini.py | 15 ++++++++++++++- .../feat-google-gemini-d5ee30b1d711bc08.yaml | 2 +- riotfile.py | 2 +- .../test_google_generativeai.py | 4 ++-- ...generativeai.test_gemini_completion_image.json | 2 +- 12 files changed, 51 insertions(+), 37 deletions(-) rename .riot/requirements/{1736f46.txt => 1e15a25.txt} (83%) rename .riot/requirements/{1025297.txt => 1f54e6b.txt} (84%) rename .riot/requirements/{9b83479.txt => e8247d6.txt} (83%) rename .riot/requirements/{1b72277.txt => ebe4ea5.txt} (84%) diff --git a/.riot/requirements/1736f46.txt b/.riot/requirements/1e15a25.txt similarity index 83% rename from .riot/requirements/1736f46.txt rename to .riot/requirements/1e15a25.txt index 0ff3f25b45c..36405478a02 100644 --- a/.riot/requirements/1736f46.txt +++ b/.riot/requirements/1e15a25.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --allow-unsafe --no-annotate .riot/requirements/1736f46.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1e15a25.in # annotated-types==0.7.0 attrs==24.2.0 @@ -10,15 +10,15 @@ cachetools==5.5.0 certifi==2024.8.30 charset-normalizer==3.3.2 coverage[toml]==7.6.1 -google-ai-generativelanguage==0.6.6 +google-ai-generativelanguage==0.6.9 google-api-core[grpc]==2.19.2 -google-api-python-client==2.144.0 +google-api-python-client==2.145.0 google-auth==2.34.0 google-auth-httplib2==0.2.0 -google-generativeai==0.7.2 +google-generativeai==0.8.0 googleapis-common-protos==1.65.0 grpcio==1.66.1 -grpcio-status==1.62.3 +grpcio-status==1.66.1 httplib2==0.22.0 hypothesis==6.45.0 idna==3.8 @@ -29,13 +29,13 @@ packaging==24.1 pillow==10.4.0 pluggy==1.5.0 proto-plus==1.24.0 -protobuf==4.25.4 +protobuf==5.28.0 pyasn1==0.6.0 pyasn1-modules==0.4.0 pydantic==2.9.1 pydantic-core==2.23.3 pyparsing==3.1.4 -pytest==8.3.2 +pytest==8.3.3 pytest-asyncio==0.24.0 pytest-cov==5.0.0 pytest-mock==3.14.0 diff --git a/.riot/requirements/1025297.txt b/.riot/requirements/1f54e6b.txt similarity index 84% rename from .riot/requirements/1025297.txt rename to .riot/requirements/1f54e6b.txt index 8f793f5b156..8bcc57eabff 100644 --- a/.riot/requirements/1025297.txt +++ b/.riot/requirements/1f54e6b.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile with Python 3.10 # by the following command: # -# pip-compile --allow-unsafe --no-annotate .riot/requirements/1025297.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1f54e6b.in # annotated-types==0.7.0 attrs==24.2.0 @@ -11,15 +11,15 @@ certifi==2024.8.30 charset-normalizer==3.3.2 coverage[toml]==7.6.1 exceptiongroup==1.2.2 -google-ai-generativelanguage==0.6.6 +google-ai-generativelanguage==0.6.9 google-api-core[grpc]==2.19.2 -google-api-python-client==2.144.0 +google-api-python-client==2.145.0 google-auth==2.34.0 google-auth-httplib2==0.2.0 -google-generativeai==0.7.2 +google-generativeai==0.8.0 googleapis-common-protos==1.65.0 grpcio==1.66.1 -grpcio-status==1.62.3 +grpcio-status==1.66.1 httplib2==0.22.0 hypothesis==6.45.0 idna==3.8 @@ -30,13 +30,13 @@ packaging==24.1 pillow==10.4.0 pluggy==1.5.0 proto-plus==1.24.0 -protobuf==4.25.4 +protobuf==5.28.0 pyasn1==0.6.0 pyasn1-modules==0.4.0 pydantic==2.9.1 pydantic-core==2.23.3 pyparsing==3.1.4 -pytest==8.3.2 +pytest==8.3.3 pytest-asyncio==0.24.0 pytest-cov==5.0.0 pytest-mock==3.14.0 diff --git a/.riot/requirements/9b83479.txt b/.riot/requirements/e8247d6.txt similarity index 83% rename from .riot/requirements/9b83479.txt rename to .riot/requirements/e8247d6.txt index 5b6bf3efb1e..2aad3bb1a89 100644 --- a/.riot/requirements/9b83479.txt +++ b/.riot/requirements/e8247d6.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile with Python 3.12 # by the following command: # -# pip-compile --allow-unsafe --no-annotate .riot/requirements/9b83479.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/e8247d6.in # annotated-types==0.7.0 attrs==24.2.0 @@ -10,15 +10,15 @@ cachetools==5.5.0 certifi==2024.8.30 charset-normalizer==3.3.2 coverage[toml]==7.6.1 -google-ai-generativelanguage==0.6.6 +google-ai-generativelanguage==0.6.9 google-api-core[grpc]==2.19.2 -google-api-python-client==2.144.0 +google-api-python-client==2.145.0 google-auth==2.34.0 google-auth-httplib2==0.2.0 -google-generativeai==0.7.2 +google-generativeai==0.8.0 googleapis-common-protos==1.65.0 grpcio==1.66.1 -grpcio-status==1.62.3 +grpcio-status==1.66.1 httplib2==0.22.0 hypothesis==6.45.0 idna==3.8 @@ -29,13 +29,13 @@ packaging==24.1 pillow==10.4.0 pluggy==1.5.0 proto-plus==1.24.0 -protobuf==4.25.4 +protobuf==5.28.0 pyasn1==0.6.0 pyasn1-modules==0.4.0 pydantic==2.9.1 pydantic-core==2.23.3 pyparsing==3.1.4 -pytest==8.3.2 +pytest==8.3.3 pytest-asyncio==0.24.0 pytest-cov==5.0.0 pytest-mock==3.14.0 diff --git a/.riot/requirements/1b72277.txt b/.riot/requirements/ebe4ea5.txt similarity index 84% rename from .riot/requirements/1b72277.txt rename to .riot/requirements/ebe4ea5.txt index d30449481ba..264c2960158 100644 --- a/.riot/requirements/1b72277.txt +++ b/.riot/requirements/ebe4ea5.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --allow-unsafe --no-annotate .riot/requirements/1b72277.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/ebe4ea5.in # annotated-types==0.7.0 attrs==24.2.0 @@ -11,15 +11,15 @@ certifi==2024.8.30 charset-normalizer==3.3.2 coverage[toml]==7.6.1 exceptiongroup==1.2.2 -google-ai-generativelanguage==0.6.6 +google-ai-generativelanguage==0.6.9 google-api-core[grpc]==2.19.2 -google-api-python-client==2.144.0 +google-api-python-client==2.145.0 google-auth==2.34.0 google-auth-httplib2==0.2.0 -google-generativeai==0.7.2 +google-generativeai==0.8.0 googleapis-common-protos==1.65.0 grpcio==1.66.1 -grpcio-status==1.62.3 +grpcio-status==1.66.1 httplib2==0.22.0 hypothesis==6.45.0 idna==3.8 @@ -30,13 +30,13 @@ packaging==24.1 pillow==10.4.0 pluggy==1.5.0 proto-plus==1.24.0 -protobuf==4.25.4 +protobuf==5.28.0 pyasn1==0.6.0 pyasn1-modules==0.4.0 pydantic==2.9.1 pydantic-core==2.23.3 pyparsing==3.1.4 -pytest==8.3.2 +pytest==8.3.3 pytest-asyncio==0.24.0 pytest-cov==5.0.0 pytest-mock==3.14.0 diff --git a/ddtrace/contrib/google_generativeai/__init__.py b/ddtrace/contrib/google_generativeai/__init__.py index 32fa860ea8c..15d90c697a3 100644 --- a/ddtrace/contrib/google_generativeai/__init__.py +++ b/ddtrace/contrib/google_generativeai/__init__.py @@ -76,7 +76,7 @@ from ddtrace import Pin, config Pin.override(genai, service="my-gemini-service") -""" # noqa: E501 +""" from ...internal.utils.importlib import require_modules diff --git a/ddtrace/contrib/internal/google_generativeai/_utils.py b/ddtrace/contrib/internal/google_generativeai/_utils.py index efc83aa8c16..db23ab7f947 100644 --- a/ddtrace/contrib/internal/google_generativeai/_utils.py +++ b/ddtrace/contrib/internal/google_generativeai/_utils.py @@ -168,8 +168,6 @@ def tag_request(span, integration, instance, args, kwargs): if stream: span.set_tag("genai.request.stream", True) - span.set_tag_str("genai.request.model", str(_extract_model_name(instance))) - if not integration.is_pc_sampled_span(span): return diff --git a/ddtrace/contrib/internal/google_generativeai/patch.py b/ddtrace/contrib/internal/google_generativeai/patch.py index 25b9fa945fb..1dc695f4ba0 100644 --- a/ddtrace/contrib/internal/google_generativeai/patch.py +++ b/ddtrace/contrib/internal/google_generativeai/patch.py @@ -7,6 +7,7 @@ from ddtrace.contrib.internal.google_generativeai._utils import TracedAsyncGenerateContentResponse from ddtrace.contrib.internal.google_generativeai._utils import TracedGenerateContentResponse from ddtrace.contrib.internal.google_generativeai._utils import _extract_api_key +from ddtrace.contrib.internal.google_generativeai._utils import _extract_model_name from ddtrace.contrib.internal.google_generativeai._utils import tag_request from ddtrace.contrib.internal.google_generativeai._utils import tag_response from ddtrace.contrib.trace_utils import unwrap @@ -41,6 +42,7 @@ def traced_generate(genai, pin, func, instance, args, kwargs): pin, "%s.%s" % (instance.__class__.__name__, func.__name__), provider="google", + model=_extract_model_name(instance), ) try: tag_request(span, integration, instance, args, kwargs) @@ -70,6 +72,7 @@ async def traced_agenerate(genai, pin, func, instance, args, kwargs): pin, "%s.%s" % (instance.__class__.__name__, func.__name__), provider="google", + model=_extract_model_name(instance), ) try: tag_request(span, integration, instance, args, kwargs) diff --git a/ddtrace/llmobs/_integrations/gemini.py b/ddtrace/llmobs/_integrations/gemini.py index 258c9e16fdb..16cf714f4c4 100644 --- a/ddtrace/llmobs/_integrations/gemini.py +++ b/ddtrace/llmobs/_integrations/gemini.py @@ -1,5 +1,18 @@ -from .base import BaseLLMIntegration +from typing import Any +from typing import Dict +from typing import Optional + +from ddtrace.llmobs._integrations.base import BaseLLMIntegration +from ddtrace import Span class GeminiIntegration(BaseLLMIntegration): _integration_name = "gemini" + + def _set_base_span_tags( + self, span: Span, provider: Optional[str] = None, model: Optional[str] = None, **kwargs: Dict[str, Any] + ) -> None: + if provider: + span.set_tag_str("genai.request.model", model) + if model: + span.set_tag_str("genai.request.provider", provider) diff --git a/releasenotes/notes/feat-google-gemini-d5ee30b1d711bc08.yaml b/releasenotes/notes/feat-google-gemini-d5ee30b1d711bc08.yaml index 7a0315be804..0a7e38a6fda 100644 --- a/releasenotes/notes/feat-google-gemini-d5ee30b1d711bc08.yaml +++ b/releasenotes/notes/feat-google-gemini-d5ee30b1d711bc08.yaml @@ -1,6 +1,6 @@ --- features: - | - google_generativeai: This introduces tracing support for Google Gemini API ``generate_content`` calls. + google_generativeai: Introduces tracing support for Google Gemini API ``generate_content`` calls. See `the docs `_ for more information. diff --git a/riotfile.py b/riotfile.py index fc3e6407ff8..c1e8e7980ad 100644 --- a/riotfile.py +++ b/riotfile.py @@ -2691,7 +2691,7 @@ def select_pys(min_version=MIN_PYTHON_VERSION, max_version=MAX_PYTHON_VERSION): pys=select_pys(min_version="3.9"), pkgs={ "pytest-asyncio": latest, - "google-generativeai": ["~=0.7.2"], + "google-generativeai": [latest], "pillow": latest, }, ), diff --git a/tests/contrib/google_generativeai/test_google_generativeai.py b/tests/contrib/google_generativeai/test_google_generativeai.py index 10baf181ca6..79490af9ec8 100644 --- a/tests/contrib/google_generativeai/test_google_generativeai.py +++ b/tests/contrib/google_generativeai/test_google_generativeai.py @@ -338,7 +338,7 @@ def test_gemini_completion_image(genai, mock_client): mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_IMG_CALL)) llm = genai.GenerativeModel("gemini-1.5-flash") llm.generate_content( - [img, "Return a bounding box for the piranha. \n [ymin, xmin, ymax, xmax"], + [img, "Return a bounding box for the apple. \n [ymin, xmin, ymax, xmax]"], generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), ) @@ -353,6 +353,6 @@ async def test_gemini_completion_image_async(genai, mock_client_async): mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_IMG_CALL)) llm = genai.GenerativeModel("gemini-1.5-flash") await llm.generate_content_async( - [img, "Return a bounding box for the piranha. \n [ymin, xmin, ymax, xmax"], + [img, "Return a bounding box for the apple. \n [ymin, xmin, ymax, xmax]"], generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=30, temperature=1.0), ) diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json index a641ad0225a..0c8a82a3d5d 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json @@ -13,7 +13,7 @@ "_dd.p.tid": "66df5b6b00000000", "genai.request.api_key": "...key>", "genai.request.contents.0.text": "[Non-text content object: ]", - "genai.request.contents.1.text": "Return a bounding box for the piranha. \\n [ymin, xmin, ymax, xmax", + "genai.request.contents.1.text": "Return a bounding box for the apple. \\n [ymin, xmin, ymax, xmax]", "genai.request.generation_config.candidate_count": "None", "genai.request.generation_config.max_output_tokens": "30", "genai.request.generation_config.response_mime_type": "None", From f2c33312ddcbd2af548d1ca3fc844b501322cb14 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Tue, 10 Sep 2024 18:07:03 -0400 Subject: [PATCH 22/32] fix snapshots --- .../google_generativeai/test_google_generativeai.py | 8 ++++---- ...i.test_google_generativeai.test_gemini_completion.json | 1 + ..._google_generativeai.test_gemini_completion_error.json | 1 + ..._google_generativeai.test_gemini_completion_image.json | 1 + ...rativeai.test_gemini_completion_multiple_messages.json | 1 + ...google_generativeai.test_gemini_completion_stream.json | 3 ++- ...generativeai.test_gemini_completion_system_prompt.json | 3 ++- ...e_generativeai.test_gemini_completion_tool_stream.json | 3 ++- ...gle_generativeai.test_gemini_tool_chat_completion.json | 6 ++++-- ...t_google_generativeai.test_gemini_tool_completion.json | 3 ++- 10 files changed, 20 insertions(+), 10 deletions(-) diff --git a/tests/contrib/google_generativeai/test_google_generativeai.py b/tests/contrib/google_generativeai/test_google_generativeai.py index 79490af9ec8..5581f3dd7cc 100644 --- a/tests/contrib/google_generativeai/test_google_generativeai.py +++ b/tests/contrib/google_generativeai/test_google_generativeai.py @@ -185,7 +185,7 @@ def test_gemini_completion_system_prompt(genai, mock_client): ) llm.generate_content( "What is the argument for LeBron James being the GOAT?", - generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=50, temperature=1.0), ) @@ -201,7 +201,7 @@ async def test_gemini_completion_system_prompt_async(genai, mock_client_async): ) await llm.generate_content_async( "What is the argument for LeBron James being the GOAT?", - generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=50, temperature=1.0), ) @@ -213,7 +213,7 @@ def test_gemini_completion_stream(genai, mock_client): llm = genai.GenerativeModel("gemini-1.5-flash") response = llm.generate_content( "Can you recite the alphabet?", - generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=60, temperature=1.0), stream=True, ) for _ in response: @@ -229,7 +229,7 @@ async def test_gemini_completion_stream_async(genai, mock_client_async): llm = genai.GenerativeModel("gemini-1.5-flash") response = await llm.generate_content_async( "Can you recite the alphabet?", - generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=60, temperature=1.0), stream=True, ) async for _ in response: diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion.json index 3dd7ab7c763..e9d853e2663 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion.json @@ -22,6 +22,7 @@ "genai.request.generation_config.top_k": "None", "genai.request.generation_config.top_p": "None", "genai.request.model": "gemini-1.5-flash", + "genai.request.provider": "google", "genai.response.candidates.0.content.parts.0.text": "The argument for LeBron James being the 'Greatest of All Time' (GOAT) is multifaceted and involves a variety of factors. Here's ...", "genai.response.candidates.0.content.role": "model", "genai.response.candidates.0.finish_reason": "2", diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_error.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_error.json index 5959a2be298..816427df038 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_error.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_error.json @@ -24,6 +24,7 @@ "genai.request.generation_config.top_k": "None", "genai.request.generation_config.top_p": "None", "genai.request.model": "gemini-1.5-flash", + "genai.request.provider": "google", "language": "python", "runtime-id": "c20f6ea1fd834b0094c087a8dd7550ec" }, diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json index 0c8a82a3d5d..bf57184f6b4 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json @@ -23,6 +23,7 @@ "genai.request.generation_config.top_k": "None", "genai.request.generation_config.top_p": "None", "genai.request.model": "gemini-1.5-flash", + "genai.request.provider": "google", "genai.response.candidates.0.content.parts.0.text": "57 100 900 911", "genai.response.candidates.0.content.role": "model", "genai.response.candidates.0.finish_reason": "2", diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_multiple_messages.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_multiple_messages.json index f842fd1f8cd..68685e253d9 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_multiple_messages.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_multiple_messages.json @@ -27,6 +27,7 @@ "genai.request.generation_config.top_k": "None", "genai.request.generation_config.top_p": "None", "genai.request.model": "gemini-1.5-flash", + "genai.request.provider": "google", "genai.response.candidates.0.content.parts.0.text": "The sky appears blue due to a phenomenon called **Rayleigh scattering**. \\nHere's how it works:* **Sunlight is made up of all co...", "genai.response.candidates.0.content.role": "model", "genai.response.candidates.0.finish_reason": "2", diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_stream.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_stream.json index e01780c0b92..569fefef6fb 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_stream.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_stream.json @@ -14,7 +14,7 @@ "genai.request.api_key": "...key>", "genai.request.contents.0.text": "Can you recite the alphabet?", "genai.request.generation_config.candidate_count": "None", - "genai.request.generation_config.max_output_tokens": "35", + "genai.request.generation_config.max_output_tokens": "60", "genai.request.generation_config.response_mime_type": "None", "genai.request.generation_config.response_schema": "None", "genai.request.generation_config.stop_sequences": "['x']", @@ -22,6 +22,7 @@ "genai.request.generation_config.top_k": "None", "genai.request.generation_config.top_p": "None", "genai.request.model": "gemini-1.5-flash", + "genai.request.provider": "google", "genai.request.stream": "True", "genai.response.candidates.0.content.parts.0.text": "A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z.\\n", "genai.response.candidates.0.content.role": "model", diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_system_prompt.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_system_prompt.json index 973da5b3167..192b9f55902 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_system_prompt.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_system_prompt.json @@ -14,7 +14,7 @@ "genai.request.api_key": "...key>", "genai.request.contents.0.text": "What is the argument for LeBron James being the GOAT?", "genai.request.generation_config.candidate_count": "None", - "genai.request.generation_config.max_output_tokens": "35", + "genai.request.generation_config.max_output_tokens": "50", "genai.request.generation_config.response_mime_type": "None", "genai.request.generation_config.response_schema": "None", "genai.request.generation_config.stop_sequences": "['x']", @@ -22,6 +22,7 @@ "genai.request.generation_config.top_k": "None", "genai.request.generation_config.top_p": "None", "genai.request.model": "gemini-1.5-flash", + "genai.request.provider": "google", "genai.request.system_instruction.0.text": "You are a die-hard Michael Jordan fan that always brings stats to the discussion.", "genai.response.candidates.0.content.parts.0.text": "Look, I respect LeBron James. He's a phenomenal player, an incredible athlete, and a great ambassador for the game. But when it ...", "genai.response.candidates.0.content.role": "model", diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_tool_stream.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_tool_stream.json index 490f8fa6acf..1c1a87b345f 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_tool_stream.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_tool_stream.json @@ -22,8 +22,9 @@ "genai.request.generation_config.top_k": "None", "genai.request.generation_config.top_p": "None", "genai.request.model": "gemini-1.5-flash", + "genai.request.provider": "google", "genai.request.stream": "True", - "genai.response.candidates.0.content.parts.0.function_call.args": "{'fields': [{'key': 'color_temp', 'value': 'warm'}, {'key': 'brightness', 'value': 50.0}]}", + "genai.response.candidates.0.content.parts.0.function_call.args": "{'fields': [{'value': 'warm', 'key': 'color_temp'}, {'value': 50.0, 'key': 'brightness'}]}", "genai.response.candidates.0.content.parts.0.function_call.name": "set_light_values", "genai.response.candidates.0.content.parts.0.text": "", "genai.response.candidates.0.content.role": "model", diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_chat_completion.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_chat_completion.json index 777b57a1d9a..87928bd5dbc 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_chat_completion.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_chat_completion.json @@ -15,7 +15,8 @@ "genai.request.contents.0.parts.0.text": "Dim the lights so the room feels cozy and warm.", "genai.request.contents.0.role": "user", "genai.request.model": "gemini-1.5-flash", - "genai.response.candidates.0.content.parts.0.function_call.args": "{'fields': [{'key': 'color_temp', 'value': 'warm'}, {'key': 'brightness', 'value': 50.0}]}", + "genai.request.provider": "google", + "genai.response.candidates.0.content.parts.0.function_call.args": "{'fields': [{'value': 'warm', 'key': 'color_temp'}, {'value': 50.0, 'key': 'brightness'}]}", "genai.response.candidates.0.content.parts.0.function_call.name": "set_light_values", "genai.response.candidates.0.content.parts.0.text": "", "genai.response.candidates.0.content.role": "model", @@ -52,7 +53,7 @@ "genai.request.api_key": "...key>", "genai.request.contents.0.parts.0.text": "Dim the lights so the room feels cozy and warm.", "genai.request.contents.0.role": "user", - "genai.request.contents.1.parts.0.function_call.args": "{'fields': [{'key': 'color_temp', 'value': 'warm'}, {'key': 'brightness', 'value': 50.0}]}", + "genai.request.contents.1.parts.0.function_call.args": "{'fields': [{'value': 'warm', 'key': 'color_temp'}, {'value': 50.0, 'key': 'brightness'}]}", "genai.request.contents.1.parts.0.function_call.name": "set_light_values", "genai.request.contents.1.parts.0.text": "", "genai.request.contents.1.role": "model", @@ -61,6 +62,7 @@ "genai.request.contents.2.parts.0.text": "", "genai.request.contents.2.role": "user", "genai.request.model": "gemini-1.5-flash", + "genai.request.provider": "google", "genai.response.candidates.0.content.parts.0.text": "OK. I've dimmed the lights to 50% and set the color temperature to warm. How's that? \\n", "genai.response.candidates.0.content.role": "model", "genai.response.candidates.0.finish_reason": "2", diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_completion.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_completion.json index 93c99f18d6f..16aabb989a1 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_completion.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_completion.json @@ -22,7 +22,8 @@ "genai.request.generation_config.top_k": "None", "genai.request.generation_config.top_p": "None", "genai.request.model": "gemini-1.5-flash", - "genai.response.candidates.0.content.parts.0.function_call.args": "{'fields': [{'key': 'color_temp', 'value': 'warm'}, {'key': 'brightness', 'value': 50.0}]}", + "genai.request.provider": "google", + "genai.response.candidates.0.content.parts.0.function_call.args": "{'fields': [{'value': 'warm', 'key': 'color_temp'}, {'value': 50.0, 'key': 'brightness'}]}", "genai.response.candidates.0.content.parts.0.function_call.name": "set_light_values", "genai.response.candidates.0.content.parts.0.text": "", "genai.response.candidates.0.content.role": "model", From 99b13ac23f0194672d8ae781928da812ee582020 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Tue, 10 Sep 2024 18:24:07 -0400 Subject: [PATCH 23/32] Refactor --- ddtrace/llmobs/_integrations/gemini.py | 74 +++++++++++++------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/ddtrace/llmobs/_integrations/gemini.py b/ddtrace/llmobs/_integrations/gemini.py index f73f658a73c..2f0f830a94f 100644 --- a/ddtrace/llmobs/_integrations/gemini.py +++ b/ddtrace/llmobs/_integrations/gemini.py @@ -1,6 +1,7 @@ import json from typing import Any from typing import Dict +from typing import List from typing import Optional from ddtrace import Span @@ -17,22 +18,22 @@ from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY from ddtrace.llmobs._integrations.base import BaseLLMIntegration from ddtrace.llmobs._utils import _unserializable_default_repr -from ddtrace.llmobs._integrations.base import BaseLLMIntegration -from ddtrace import Span class GeminiIntegration(BaseLLMIntegration): _integration_name = "gemini" def _set_base_span_tags( - self, span: Span, provider: Optional[str] = None, model: Optional[str] = None, **kwargs: Dict[str, Any] + self, span: Span, provider: Optional[str] = None, model: Optional[str] = None, **kwargs: Dict[str, object] ) -> None: if provider: span.set_tag_str("genai.request.model", model) if model: span.set_tag_str("genai.request.provider", provider) - def llmobs_set_tags(self, span: Span, args: Any, kwargs: Any, instance: Any, generations: Any = None) -> None: + def llmobs_set_tags( + self, span: Span, args: List[Any], kwargs: Dict[str, Any], instance: Any, generations: Any = None + ) -> None: if not self.llmobs_enabled: return @@ -72,10 +73,29 @@ def _llmobs_set_metadata(kwargs, instance): return metadata @staticmethod - def _extract_input_message(contents, system_instruction=None): + def _extract_message_from_part(part, role): + text = _get_attr(part, "text", "") + function_call = _get_attr(part, "function_call", None) + function_response = _get_attr(part, "function_response", None) + message = {"content": text, "role": role} + if function_call: + function_call_dict = function_call + if not isinstance(function_call, dict): + function_call_dict = type(function_call).to_dict(function_call) + message["tool_calls"] = [ + {"name": function_call_dict.get("name", ""), "arguments": function_call_dict.get("args", {})} + ] + if function_response: + function_response_dict = function_response + if not isinstance(function_response, dict): + function_response_dict = type(function_response).to_dict(function_response) + message["content"] = "[tool result: {}]".format(function_response_dict.get("response", "")) + return message + + def _extract_input_message(self, contents, system_instruction=None): messages = [] if system_instruction: - for idx, part in enumerate(system_instruction.parts): + for part in system_instruction.parts: messages.append({"content": part.text or "", "role": "system"}) if isinstance(contents, str): messages.append({"content": contents, "role": "user"}) @@ -84,50 +104,31 @@ def _extract_input_message(contents, system_instruction=None): messages.append({"content": contents.get("text", ""), "role": contents.get("role", "user")}) return messages elif not isinstance(contents, list): + messages.append({"content": "[Non-text content object: {}]".format(repr(contents)), "role": "user"}) return messages - for content_idx, content in enumerate(contents): + for content in contents: if isinstance(content, str): messages.append({"content": content, "role": "user"}) continue role = _get_attr(content, "role", "user") parts = _get_attr(content, "parts", []) - if not parts: + if not isinstance(parts, list): messages.append({"content": "[Non-text content object: {}]".format(repr(content)), "role": role}) for part in parts: - text = _get_attr(part, "text", "") - function_call = _get_attr(part, "function_call", None) - function_response = _get_attr(part, "function_response", None) - message = {"content": text, "role": role} - if function_call: - function_call_dict = type(function_call).to_dict(function_call) - message["tool_calls"] = [ - {"name": function_call_dict.get("name", ""), "arguments": function_call_dict.get("args", {})} - ] - if function_response: - function_response_dict = type(function_response).to_dict(function_response) - message["content"] = "[tool result: {}]".format(function_response_dict.get("response", "")) + message = self._extract_message_from_part(part, role) messages.append(message) return messages - @staticmethod - def _extract_output_message(generations): + def _extract_output_message(self, generations): output_messages = [] generations_dict = generations.to_dict() - for idx, candidate in enumerate(generations_dict.get("candidates", [])): + for candidate in generations_dict.get("candidates", []): content = candidate.get("content", {}) role = content.get("role", "model") parts = content.get("parts", []) - for part_idx, part in enumerate(parts): - text = part.get("text", "") - function_call = part.get("function_call", None) - if not function_call: - output_messages.append({"content": text, "role": role}) - continue - function_name = function_call.get("name", "") - function_args = function_call.get("args", {}) - output_messages.append( - {"content": text, "role": role, "tool_calls": [{"name": function_name, "arguments": function_args}]} - ) + for part in parts: + message = self._extract_message_from_part(part, role) + output_messages.append(message) return output_messages @staticmethod @@ -146,9 +147,8 @@ def _get_llmobs_metrics_tags(span): return usage -def _get_attr(o: Any, attr: str, default: Any): +def _get_attr(o: object, attr: str, default: object): # Convenience method to get an attribute from an object or dict if isinstance(o, dict): return o.get(attr, default) - else: - return getattr(o, attr, default) + return getattr(o, attr, default) From 62d48499d8ccae86af656ee57759f32e04a503cd Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Tue, 10 Sep 2024 18:29:59 -0400 Subject: [PATCH 24/32] fmt --- .../test_google_generativeai_llmobs.py | 165 +++++++++++++----- tests/llmobs/_utils.py | 1 + 2 files changed, 119 insertions(+), 47 deletions(-) diff --git a/tests/contrib/google_generativeai/test_google_generativeai_llmobs.py b/tests/contrib/google_generativeai/test_google_generativeai_llmobs.py index a293215b8d7..6b3e5a8fc9e 100644 --- a/tests/contrib/google_generativeai/test_google_generativeai_llmobs.py +++ b/tests/contrib/google_generativeai/test_google_generativeai_llmobs.py @@ -28,8 +28,7 @@ def test_completion(self, genai, ddtrace_global_config, mock_llmobs_writer, mock llm = genai.GenerativeModel("gemini-1.5-flash") llm.generate_content( "What is the argument for LeBron James being the GOAT?", - generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, - temperature=1.0), + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), ) span = mock_tracer.pop_traces()[0][0] assert mock_llmobs_writer.enqueue.call_count == 1 @@ -42,9 +41,9 @@ def test_completion(self, genai, ddtrace_global_config, mock_llmobs_writer, mock {"content": MOCK_COMPLETION_SIMPLE_1["candidates"][0]["content"]["parts"][0]["text"], "role": "model"}, ], metadata={"temperature": 1.0, "max_output_tokens": 35}, - token_metrics={"input_tokens": 12, "output_tokens": 30, "total_tokens": 42}, + token_metrics={"input_tokens": 12, "output_tokens": 30, "total_tokens": 42}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) @@ -55,8 +54,7 @@ async def test_completion_async( llm = genai.GenerativeModel("gemini-1.5-flash") await llm.generate_content_async( "What is the argument for LeBron James being the GOAT?", - generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, - temperature=1.0), + generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, temperature=1.0), ) span = mock_tracer.pop_traces()[0][0] assert mock_llmobs_writer.enqueue.call_count == 1 @@ -69,9 +67,9 @@ async def test_completion_async( {"content": MOCK_COMPLETION_SIMPLE_1["candidates"][0]["content"]["parts"][0]["text"], "role": "model"} ], metadata={"temperature": 1.0, "max_output_tokens": 35}, - token_metrics={"input_tokens": 12, "output_tokens": 30, "total_tokens": 42}, + token_metrics={"input_tokens": 12, "output_tokens": 30, "total_tokens": 42}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) @@ -82,8 +80,9 @@ def test_completion_error(self, genai, ddtrace_global_config, mock_llmobs_writer with pytest.raises(InvalidArgument): llm.generate_content( "What is the argument for LeBron James being the GOAT?", - generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, - temperature=1.0), + generation_config=genai.types.GenerationConfig( + stop_sequences=["x"], max_output_tokens=35, temperature=1.0 + ), ) span = mock_tracer.pop_traces()[0][0] assert mock_llmobs_writer.enqueue.call_count == 1 @@ -99,7 +98,7 @@ def test_completion_error(self, genai, ddtrace_global_config, mock_llmobs_writer error_stack=span.get_tag("error.stack"), metadata={"temperature": 1.0, "max_output_tokens": 35}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) ) @@ -109,12 +108,14 @@ async def test_completion_error_async( llm = genai.GenerativeModel("gemini-1.5-flash") llm._async_client = mock.Mock() llm._async_client.generate_content.side_effect = InvalidArgument( - "Invalid API key. Please pass a valid API key.") + "Invalid API key. Please pass a valid API key." + ) with pytest.raises(InvalidArgument): await llm.generate_content_async( "What is the argument for LeBron James being the GOAT?", - generation_config=genai.types.GenerationConfig(stop_sequences=["x"], max_output_tokens=35, - temperature=1.0), + generation_config=genai.types.GenerationConfig( + stop_sequences=["x"], max_output_tokens=35, temperature=1.0 + ), ) span = mock_tracer.pop_traces()[0][0] assert mock_llmobs_writer.enqueue.call_count == 1 @@ -130,11 +131,13 @@ async def test_completion_error_async( error_stack=span.get_tag("error.stack"), metadata={"temperature": 1.0, "max_output_tokens": 35}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) ) - def test_completion_multiple_messages(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client, mock_tracer): + def test_completion_multiple_messages( + self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client, mock_tracer + ): mock_client.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_2)) llm = genai.GenerativeModel("gemini-1.5-flash") llm.generate_content( @@ -162,7 +165,7 @@ def test_completion_multiple_messages(self, genai, ddtrace_global_config, mock_l metadata={"temperature": 1.0, "max_output_tokens": 35}, token_metrics={"input_tokens": 24, "output_tokens": 35, "total_tokens": 59}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) @@ -196,7 +199,7 @@ async def test_completion_multiple_messages_async( metadata={"temperature": 1.0, "max_output_tokens": 35}, token_metrics={"input_tokens": 24, "output_tokens": 35, "total_tokens": 59}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) @@ -230,11 +233,13 @@ def test_chat_completion(self, genai, ddtrace_global_config, mock_llmobs_writer, metadata={"temperature": 1.0, "max_output_tokens": 35}, token_metrics={"input_tokens": 24, "output_tokens": 35, "total_tokens": 59}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) - async def test_chat_completion_async(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer): + async def test_chat_completion_async( + self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer + ): mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_2)) llm = genai.GenerativeModel("gemini-1.5-flash") chat = llm.start_chat( @@ -264,7 +269,7 @@ async def test_chat_completion_async(self, genai, ddtrace_global_config, mock_ll metadata={"temperature": 1.0, "max_output_tokens": 35}, token_metrics={"input_tokens": 24, "output_tokens": 35, "total_tokens": 59}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) @@ -285,20 +290,28 @@ def test_completion_system_prompt(self, genai, ddtrace_global_config, mock_llmob model_name="gemini-1.5-flash", model_provider="google", input_messages=[ - {"content": "You are a die-hard Michael Jordan fan that always brings stats to the discussion.", "role": "system"}, + { + "content": "You are a die-hard Michael Jordan fan that always brings stats to the discussion.", + "role": "system", + }, {"content": "What is the argument for LeBron James being the GOAT?", "role": "user"}, ], output_messages=[ - {"content": MOCK_COMPLETION_SIMPLE_SYSTEM["candidates"][0]["content"]["parts"][0]["text"], "role": "model"} + { + "content": MOCK_COMPLETION_SIMPLE_SYSTEM["candidates"][0]["content"]["parts"][0]["text"], + "role": "model", + } ], metadata={"temperature": 1.0, "max_output_tokens": 50}, token_metrics={"input_tokens": 29, "output_tokens": 45, "total_tokens": 74}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) - async def test_completion_system_prompt_async(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer): + async def test_completion_system_prompt_async( + self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer + ): mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_SIMPLE_SYSTEM)) llm = genai.GenerativeModel( "gemini-1.5-flash", @@ -315,16 +328,22 @@ async def test_completion_system_prompt_async(self, genai, ddtrace_global_config model_name="gemini-1.5-flash", model_provider="google", input_messages=[ - {"content": "You are a die-hard Michael Jordan fan that always brings stats to the discussion.", "role": "system"}, + { + "content": "You are a die-hard Michael Jordan fan that always brings stats to the discussion.", + "role": "system", + }, {"content": "What is the argument for LeBron James being the GOAT?", "role": "user"}, ], output_messages=[ - {"content": MOCK_COMPLETION_SIMPLE_SYSTEM["candidates"][0]["content"]["parts"][0]["text"], "role": "model"}, + { + "content": MOCK_COMPLETION_SIMPLE_SYSTEM["candidates"][0]["content"]["parts"][0]["text"], + "role": "model", + }, ], metadata={"temperature": 1.0, "max_output_tokens": 50}, token_metrics={"input_tokens": 29, "output_tokens": 45, "total_tokens": 74}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) @@ -353,14 +372,16 @@ def test_completion_stream(self, genai, ddtrace_global_config, mock_llmobs_write metadata={"temperature": 1.0, "max_output_tokens": 60}, token_metrics={"input_tokens": 6, "output_tokens": 52, "total_tokens": 58}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) async def test_completion_stream_async( self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer ): - mock_client_async.responses["stream_generate_content"] = [_async_streamed_response(MOCK_COMPLETION_STREAM_CHUNKS)] + mock_client_async.responses["stream_generate_content"] = [ + _async_streamed_response(MOCK_COMPLETION_STREAM_CHUNKS) + ] llm = genai.GenerativeModel("gemini-1.5-flash") response = await llm.generate_content_async( "Can you recite the alphabet?", @@ -382,7 +403,7 @@ async def test_completion_stream_async( metadata={"temperature": 1.0, "max_output_tokens": 60}, token_metrics={"input_tokens": 6, "output_tokens": 52, "total_tokens": 58}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) @@ -401,16 +422,29 @@ def test_completion_tool_call(self, genai, ddtrace_global_config, mock_llmobs_wr model_provider="google", input_messages=[{"content": "Dim the lights so the room feels cozy and warm.", "role": "user"}], output_messages=[ - {"content": "", "role": "model", "tool_calls": [{"name": "set_light_values", "arguments": {"fields": [{"key": "color_temp", "value": "warm"}, {"key": "brightness", "value": 50}]}}]} + { + "content": "", + "role": "model", + "tool_calls": [ + { + "name": "set_light_values", + "arguments": { + "fields": [{"key": "color_temp", "value": "warm"}, {"key": "brightness", "value": 50}] + }, + } + ], + } ], metadata={"temperature": 1.0, "max_output_tokens": 30}, token_metrics={"input_tokens": 150, "output_tokens": 25, "total_tokens": 175}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) - async def test_completion_tool_call_async(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer): + async def test_completion_tool_call_async( + self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer + ): mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_TOOL_CALL)) llm = genai.GenerativeModel("gemini-1.5-flash", tools=[set_light_values]) await llm.generate_content_async( @@ -425,16 +459,29 @@ async def test_completion_tool_call_async(self, genai, ddtrace_global_config, mo model_provider="google", input_messages=[{"content": "Dim the lights so the room feels cozy and warm.", "role": "user"}], output_messages=[ - {"content": "", "role": "model", "tool_calls": [{"name": "set_light_values", "arguments": {"fields": [{"key": "color_temp", "value": "warm"}, {"key": "brightness", "value": 50}]}}]} + { + "content": "", + "role": "model", + "tool_calls": [ + { + "name": "set_light_values", + "arguments": { + "fields": [{"key": "color_temp", "value": "warm"}, {"key": "brightness", "value": 50}] + }, + } + ], + } ], metadata={"temperature": 1.0, "max_output_tokens": 30}, token_metrics={"input_tokens": 150, "output_tokens": 25, "total_tokens": 175}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) - def test_gemini_completion_tool_stream(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client, mock_tracer): + def test_gemini_completion_tool_stream( + self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client, mock_tracer + ): mock_client.responses["stream_generate_content"] = [ (_mock_completion_stream_chunk(chunk) for chunk in MOCK_COMPLETION_TOOL_CALL_STREAM_CHUNKS) ] @@ -454,17 +501,29 @@ def test_gemini_completion_tool_stream(self, genai, ddtrace_global_config, mock_ model_provider="google", input_messages=[{"content": "Dim the lights so the room feels cozy and warm.", "role": "user"}], output_messages=[ - {"content": "", "role": "model", "tool_calls": [{"name": "set_light_values", "arguments": { - "fields": [{"key": "color_temp", "value": "warm"}, {"key": "brightness", "value": 50}]}}]} + { + "content": "", + "role": "model", + "tool_calls": [ + { + "name": "set_light_values", + "arguments": { + "fields": [{"key": "color_temp", "value": "warm"}, {"key": "brightness", "value": 50}] + }, + } + ], + } ], metadata={"temperature": 1.0, "max_output_tokens": 30}, token_metrics={"input_tokens": 150, "output_tokens": 25, "total_tokens": 175}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) - async def test_gemini_completion_tool_stream_async(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer): + async def test_gemini_completion_tool_stream_async( + self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer + ): mock_client_async.responses["stream_generate_content"] = [ _async_streamed_response(MOCK_COMPLETION_TOOL_CALL_STREAM_CHUNKS) ] @@ -484,13 +543,23 @@ async def test_gemini_completion_tool_stream_async(self, genai, ddtrace_global_c model_provider="google", input_messages=[{"content": "Dim the lights so the room feels cozy and warm.", "role": "user"}], output_messages=[ - {"content": "", "role": "model", "tool_calls": [{"name": "set_light_values", "arguments": { - "fields": [{"key": "color_temp", "value": "warm"}, {"key": "brightness", "value": 50}]}}]} + { + "content": "", + "role": "model", + "tool_calls": [ + { + "name": "set_light_values", + "arguments": { + "fields": [{"key": "color_temp", "value": "warm"}, {"key": "brightness", "value": 50}] + }, + } + ], + } ], metadata={"temperature": 1.0, "max_output_tokens": 30}, token_metrics={"input_tokens": 150, "output_tokens": 25, "total_tokens": 175}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) @@ -517,11 +586,13 @@ def test_gemini_completion_image(self, genai, ddtrace_global_config, mock_llmobs metadata={"temperature": 1.0, "max_output_tokens": 30}, token_metrics={"input_tokens": 277, "output_tokens": 14, "total_tokens": 291}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) - async def test_gemini_completion_image_async(self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer): + async def test_gemini_completion_image_async( + self, genai, ddtrace_global_config, mock_llmobs_writer, mock_client_async, mock_tracer + ): """Ensure passing images to generate_content() won't break patching.""" img = Image.open(os.path.join(os.path.dirname(__file__), "test_data/apple.jpg")) mock_client_async.responses["generate_content"].append(_mock_completion_response(MOCK_COMPLETION_IMG_CALL)) @@ -544,6 +615,6 @@ async def test_gemini_completion_image_async(self, genai, ddtrace_global_config, metadata={"temperature": 1.0, "max_output_tokens": 30}, token_metrics={"input_tokens": 277, "output_tokens": 14, "total_tokens": 291}, tags={"ml_app": ""}, - integration="gemini" + integration="gemini", ) mock_llmobs_writer.enqueue.assert_called_with(expected_llmobs_span_event) diff --git a/tests/llmobs/_utils.py b/tests/llmobs/_utils.py index ffb8aa303df..1f64f64c28e 100644 --- a/tests/llmobs/_utils.py +++ b/tests/llmobs/_utils.py @@ -1,6 +1,7 @@ import os import mock + try: import vcr except ImportError: From f7455a9e99fac477aaf52c100e56fb5db2445ae8 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Tue, 10 Sep 2024 18:36:42 -0400 Subject: [PATCH 25/32] fmt --- ddtrace/contrib/google_generativeai/__init__.py | 5 ++--- .../test_google_generativeai_llmobs.py | 12 ++++++------ tests/llmobs/_utils.py | 1 + 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/ddtrace/contrib/google_generativeai/__init__.py b/ddtrace/contrib/google_generativeai/__init__.py index 15d90c697a3..a91f58a07ce 100644 --- a/ddtrace/contrib/google_generativeai/__init__.py +++ b/ddtrace/contrib/google_generativeai/__init__.py @@ -76,9 +76,8 @@ from ddtrace import Pin, config Pin.override(genai, service="my-gemini-service") -""" - -from ...internal.utils.importlib import require_modules +""" # noqa: E501 +from ddtrace.internal.utils.importlib import require_modules required_modules = ["google.generativeai"] diff --git a/tests/contrib/google_generativeai/test_google_generativeai_llmobs.py b/tests/contrib/google_generativeai/test_google_generativeai_llmobs.py index 6b3e5a8fc9e..1cc60d05b70 100644 --- a/tests/contrib/google_generativeai/test_google_generativeai_llmobs.py +++ b/tests/contrib/google_generativeai/test_google_generativeai_llmobs.py @@ -1,22 +1,22 @@ import os -from PIL import Image from google.api_core.exceptions import InvalidArgument import mock +from PIL import Image import pytest -from tests.llmobs._utils import _expected_llmobs_llm_span_event -from tests.contrib.google_generativeai.utils import _mock_completion_response -from tests.contrib.google_generativeai.utils import _mock_completion_stream_chunk +from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_IMG_CALL from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_SIMPLE_1 from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_SIMPLE_2 from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_SIMPLE_SYSTEM from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_STREAM_CHUNKS -from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_TOOL_CALL_STREAM_CHUNKS from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_TOOL_CALL -from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_IMG_CALL +from tests.contrib.google_generativeai.utils import MOCK_COMPLETION_TOOL_CALL_STREAM_CHUNKS from tests.contrib.google_generativeai.utils import _async_streamed_response +from tests.contrib.google_generativeai.utils import _mock_completion_response +from tests.contrib.google_generativeai.utils import _mock_completion_stream_chunk from tests.contrib.google_generativeai.utils import set_light_values +from tests.llmobs._utils import _expected_llmobs_llm_span_event @pytest.mark.parametrize( diff --git a/tests/llmobs/_utils.py b/tests/llmobs/_utils.py index 1f64f64c28e..c3f1853ac10 100644 --- a/tests/llmobs/_utils.py +++ b/tests/llmobs/_utils.py @@ -2,6 +2,7 @@ import mock + try: import vcr except ImportError: From da6b967d6ce2147994bb3ec89edaa7511a89aab9 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Tue, 10 Sep 2024 18:55:36 -0400 Subject: [PATCH 26/32] typing --- ddtrace/llmobs/_integrations/gemini.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ddtrace/llmobs/_integrations/gemini.py b/ddtrace/llmobs/_integrations/gemini.py index 2f0f830a94f..80008b946f6 100644 --- a/ddtrace/llmobs/_integrations/gemini.py +++ b/ddtrace/llmobs/_integrations/gemini.py @@ -26,10 +26,10 @@ class GeminiIntegration(BaseLLMIntegration): def _set_base_span_tags( self, span: Span, provider: Optional[str] = None, model: Optional[str] = None, **kwargs: Dict[str, object] ) -> None: - if provider: - span.set_tag_str("genai.request.model", model) - if model: - span.set_tag_str("genai.request.provider", provider) + if provider is not None: + span.set_tag_str("genai.request.model", str(model)) + if model is not None: + span.set_tag_str("genai.request.provider", str(provider)) def llmobs_set_tags( self, span: Span, args: List[Any], kwargs: Dict[str, Any], instance: Any, generations: Any = None From d7b73bb302f0e5735db63bfea920f93840cc39c7 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Wed, 11 Sep 2024 14:00:10 -0400 Subject: [PATCH 27/32] fmt, address PR comments --- .../contrib/google_generativeai/__init__.py | 6 +- .../contrib/internal/anthropic/_streaming.py | 2 +- ddtrace/contrib/internal/anthropic/patch.py | 2 +- ddtrace/contrib/internal/anthropic/utils.py | 2 +- .../internal/google_generativeai/_utils.py | 80 ++++++++++-------- .../internal/google_generativeai/patch.py | 6 +- ddtrace/llmobs/_integrations/anthropic.py | 9 +- ddtrace/llmobs/_integrations/gemini.py | 4 +- ddtrace/llmobs/_utils.py | 7 ++ .../test_google_generativeai.py | 28 +++---- ...e_generativeai.test_gemini_completion.json | 41 ++++----- ...rativeai.test_gemini_completion_error.json | 29 +++---- ...rativeai.test_gemini_completion_image.json | 43 +++++----- ...t_gemini_completion_multiple_messages.json | 51 +++++------ ...ativeai.test_gemini_completion_stream.json | 43 +++++----- ....test_gemini_completion_system_prompt.json | 43 +++++----- ...ai.test_gemini_completion_tool_stream.json | 47 +++++------ ...veai.test_gemini_tool_chat_completion.json | 84 +++++++++---------- ...erativeai.test_gemini_tool_completion.json | 45 +++++----- 19 files changed, 271 insertions(+), 301 deletions(-) diff --git a/ddtrace/contrib/google_generativeai/__init__.py b/ddtrace/contrib/google_generativeai/__init__.py index 15d90c697a3..f838fc346d9 100644 --- a/ddtrace/contrib/google_generativeai/__init__.py +++ b/ddtrace/contrib/google_generativeai/__init__.py @@ -4,8 +4,8 @@ All traces submitted from the Gemini integration are tagged by: - ``service``, ``env``, ``version``: see the `Unified Service Tagging docs `_. -- ``genai.request.model``: Google model used in the request. -- ``genai.request.api_key``: Google Gemini API key used to make the request (obfuscated to match the Google AI Studio UI representation ``...XXXX`` where ``XXXX`` is the last 4 digits of the key). +- ``google_generativeai.request.model``: Google model used in the request. +- ``google_generativeai.request.api_key``: Google Gemini API key used to make the request (obfuscated to match the Google AI Studio UI representation ``...XXXX`` where ``XXXX`` is the last 4 digits of the key). (beta) Prompt and Completion Sampling @@ -35,7 +35,7 @@ The service name reported by default for Gemini requests. - Alternatively, you can set this option with the ``DD_SERVICE`` or ``DD_ANTHROPIC_SERVICE`` environment + Alternatively, you can set this option with the ``DD_SERVICE`` or ``DD_GOOGLE_GENERATIVEAI_SERVICE`` environment variables. Default: ``DD_SERVICE`` diff --git a/ddtrace/contrib/internal/anthropic/_streaming.py b/ddtrace/contrib/internal/anthropic/_streaming.py index d790bd00813..f79d4965d12 100644 --- a/ddtrace/contrib/internal/anthropic/_streaming.py +++ b/ddtrace/contrib/internal/anthropic/_streaming.py @@ -9,7 +9,7 @@ from ddtrace.contrib.internal.anthropic.utils import tag_tool_use_output_on_span from ddtrace.internal.logger import get_logger -from ddtrace.llmobs._integrations.anthropic import _get_attr +from ddtrace.llmobs._utils import _get_attr log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/anthropic/patch.py b/ddtrace/contrib/internal/anthropic/patch.py index ff6328ea81d..d53fe5bc509 100644 --- a/ddtrace/contrib/internal/anthropic/patch.py +++ b/ddtrace/contrib/internal/anthropic/patch.py @@ -17,7 +17,7 @@ from ddtrace.internal.logger import get_logger from ddtrace.internal.utils import get_argument_value from ddtrace.llmobs._integrations import AnthropicIntegration -from ddtrace.llmobs._integrations.anthropic import _get_attr +from ddtrace.llmobs._utils import _get_attr from ddtrace.pin import Pin diff --git a/ddtrace/contrib/internal/anthropic/utils.py b/ddtrace/contrib/internal/anthropic/utils.py index d55364e818d..4dafadbe39b 100644 --- a/ddtrace/contrib/internal/anthropic/utils.py +++ b/ddtrace/contrib/internal/anthropic/utils.py @@ -3,7 +3,7 @@ from typing import Optional from ddtrace.internal.logger import get_logger -from ddtrace.llmobs._integrations.anthropic import _get_attr +from ddtrace.llmobs._utils import _get_attr log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/google_generativeai/_utils.py b/ddtrace/contrib/internal/google_generativeai/_utils.py index db23ab7f947..e671ceb7f6e 100644 --- a/ddtrace/contrib/internal/google_generativeai/_utils.py +++ b/ddtrace/contrib/internal/google_generativeai/_utils.py @@ -1,8 +1,11 @@ import sys import wrapt +from google.generativeai.types import GenerationConfigType +from google.generativeai.types.generation_types import to_generation_config_dict from ddtrace.internal.utils import get_argument_value +from ddtrace.llmobs._utils import _get_attr class BaseTracedGenerateContentResponse(wrapt.ObjectProxy): @@ -46,7 +49,10 @@ async def __aiter__(self): def _extract_model_name(instance): - """Extract the model name from the instance.""" + """Extract the model name from the instance. + The Google Gemini Python SDK stores model names in the format `"models/{model_name}"` + so we do our best to return the model name instead of the full string. + """ model_name = getattr(instance, "model_name", "") if not model_name or not isinstance(model_name, str): return "" @@ -70,32 +76,30 @@ def _extract_api_key(instance): def _tag_request_content_part(span, integration, part, part_idx, content_idx): """Tag the generation span with request content parts.""" - text = getattr(part, "text", "") - function_call = getattr(part, "function_call", None) - function_response = getattr(part, "function_response", None) - if isinstance(part, dict): - text = part.get("text", "") - function_call = part.get("function_call", None) - function_response = part.get("function_response", None) - span.set_tag_str("genai.request.contents.%d.parts.%d.text" % (content_idx, part_idx), integration.trunc(str(text))) + text = _get_attr(part, "text", "") + function_call = _get_attr(part, "function_call", None) + function_response = _get_attr(part, "function_response", None) + span.set_tag_str( + "google_generativeai.request.contents.%d.parts.%d.text" % (content_idx, part_idx), integration.trunc(str(text)) + ) if function_call: function_call_dict = type(function_call).to_dict(function_call) span.set_tag_str( - "genai.request.contents.%d.parts.%d.function_call.name" % (content_idx, part_idx), + "google_generativeai.request.contents.%d.parts.%d.function_call.name" % (content_idx, part_idx), integration.trunc(str(function_call_dict.get("name", ""))), ) span.set_tag_str( - "genai.request.contents.%d.parts.%d.function_call.args" % (content_idx, part_idx), + "google_generativeai.request.contents.%d.parts.%d.function_call.args" % (content_idx, part_idx), integration.trunc(str(function_call_dict.get("args", {}))), ) if function_response: function_response_dict = type(function_response).to_dict(function_response) span.set_tag_str( - "genai.request.contents.%d.parts.%d.function_response.name" % (content_idx, part_idx), + "google_generativeai.request.contents.%d.parts.%d.function_response.name" % (content_idx, part_idx), str(function_response_dict.get("name", "")), ) span.set_tag_str( - "genai.request.contents.%d.parts.%d.function_response.response" % (content_idx, part_idx), + "google_generativeai.request.contents.%d.parts.%d.function_response.response" % (content_idx, part_idx), integration.trunc(str(function_response_dict.get("response", {}))), ) @@ -103,23 +107,23 @@ def _tag_request_content_part(span, integration, part, part_idx, content_idx): def _tag_request_content(span, integration, content, content_idx): """Tag the generation span with request contents.""" if isinstance(content, str): - span.set_tag_str("genai.request.contents.%d.text" % content_idx, integration.trunc(content)) + span.set_tag_str("google_generativeai.request.contents.%d.text" % content_idx, integration.trunc(content)) return if isinstance(content, dict): role = content.get("role", "") if role: - span.set_tag_str("genai.request.contents.%d.role" % content_idx, str(content.get("role", ""))) + span.set_tag_str("google_generativeai.request.contents.%d.role" % content_idx, str(content.get("role", ""))) parts = content.get("parts", []) for part_idx, part in enumerate(parts): _tag_request_content_part(span, integration, part, part_idx, content_idx) return role = getattr(content, "role", "") if role: - span.set_tag_str("genai.request.contents.%d.role" % content_idx, str(role)) + span.set_tag_str("google_generativeai.request.contents.%d.role" % content_idx, str(role)) parts = getattr(content, "parts", []) if not parts: span.set_tag_str( - "genai.request.contents.%d.text" % content_idx, + "google_generativeai.request.contents.%d.text" % content_idx, integration.trunc("[Non-text content object: {}]".format(repr(content))), ) return @@ -131,18 +135,18 @@ def _tag_response_part(span, integration, part, part_idx, candidate_idx): """Tag the generation span with response part text and function calls.""" text = part.get("text", "") span.set_tag_str( - "genai.response.candidates.%d.content.parts.%d.text" % (candidate_idx, part_idx), + "google_generativeai.response.candidates.%d.content.parts.%d.text" % (candidate_idx, part_idx), integration.trunc(str(text)), ) function_call = part.get("function_call", None) if not function_call: return span.set_tag_str( - "genai.response.candidates.%d.content.parts.%d.function_call.name" % (candidate_idx, part_idx), + "google_generativeai.response.candidates.%d.content.parts.%d.function_call.name" % (candidate_idx, part_idx), integration.trunc(str(function_call.get("name", ""))), ) span.set_tag_str( - "genai.response.candidates.%d.content.parts.%d.function_call.args" % (candidate_idx, part_idx), + "google_generativeai.response.candidates.%d.content.parts.%d.function_call.args" % (candidate_idx, part_idx), integration.trunc(str(function_call.get("args", {}))), ) @@ -157,29 +161,31 @@ def tag_request(span, integration, instance, args, kwargs): stream = kwargs.get("stream", None) generation_config_dict = None - if isinstance(generation_config, dict): - generation_config_dict = generation_config - elif generation_config is not None: - generation_config_dict = generation_config.__dict__ + try: + generation_config_dict = to_generation_config_dict(generation_config) + except TypeError: + pass if generation_config_dict is not None: for k, v in generation_config_dict.items(): - span.set_tag_str("genai.request.generation_config.%s" % k, str(v)) + span.set_tag_str("google_generativeai.request.generation_config.%s" % k, str(v)) if stream: - span.set_tag("genai.request.stream", True) + span.set_tag("google_generativeai.request.stream", True) if not integration.is_pc_sampled_span(span): return if system_instruction: for idx, part in enumerate(system_instruction.parts): - span.set_tag_str("genai.request.system_instruction.%d.text" % idx, integration.trunc(str(part.text))) + span.set_tag_str( + "google_generativeai.request.system_instruction.%d.text" % idx, integration.trunc(str(part.text)) + ) if isinstance(contents, str): - span.set_tag_str("genai.request.contents.0.text", integration.trunc(contents)) + span.set_tag_str("google_generativeai.request.contents.0.text", integration.trunc(contents)) return elif isinstance(contents, dict): - span.set_tag_str("genai.request.contents.0.text", integration.trunc(str(contents))) + span.set_tag_str("google_generativeai.request.contents.0.text", integration.trunc(str(contents))) return elif not isinstance(contents, list): return @@ -193,16 +199,18 @@ def tag_response(span, generations, integration, instance): """ api_key = _extract_api_key(instance) if api_key: - span.set_tag("genai.request.api_key", "...{}".format(api_key[-4:])) + span.set_tag("google_generativeai.request.api_key", "...{}".format(api_key[-4:])) generations_dict = generations.to_dict() for candidate_idx, candidate in enumerate(generations_dict.get("candidates", [])): finish_reason = candidate.get("finish_reason", None) if finish_reason: - span.set_tag_str("genai.response.candidates.%d.finish_reason" % candidate_idx, str(finish_reason)) + span.set_tag_str( + "google_generativeai.response.candidates.%d.finish_reason" % candidate_idx, str(finish_reason) + ) candidate_content = candidate.get("content", {}) role = candidate_content.get("role", "") - span.set_tag_str("genai.response.candidates.%d.content.role" % candidate_idx, str(role)) + span.set_tag_str("google_generativeai.response.candidates.%d.content.role" % candidate_idx, str(role)) if not integration.is_pc_sampled_span(span): continue parts = candidate_content.get("parts", []) @@ -212,6 +220,8 @@ def tag_response(span, generations, integration, instance): token_counts = generations_dict.get("usage_metadata", None) if not token_counts: return - span.set_metric("genai.response.usage.prompt_tokens", token_counts.get("prompt_token_count", 0)) - span.set_metric("genai.response.usage.completion_tokens", token_counts.get("candidates_token_count", 0)) - span.set_metric("genai.response.usage.total_tokens", token_counts.get("total_token_count", 0)) + span.set_metric("google_generativeai.response.usage.prompt_tokens", token_counts.get("prompt_token_count", 0)) + span.set_metric( + "google_generativeai.response.usage.completion_tokens", token_counts.get("candidates_token_count", 0) + ) + span.set_metric("google_generativeai.response.usage.total_tokens", token_counts.get("total_token_count", 0)) diff --git a/ddtrace/contrib/internal/google_generativeai/patch.py b/ddtrace/contrib/internal/google_generativeai/patch.py index 1dc695f4ba0..43e30e5834d 100644 --- a/ddtrace/contrib/internal/google_generativeai/patch.py +++ b/ddtrace/contrib/internal/google_generativeai/patch.py @@ -21,9 +21,9 @@ "genai", { "span_prompt_completion_sample_rate": float( - os.getenv("DD_GOOGLE_GENAI_SPAN_PROMPT_COMPLETION_SAMPLE_RATE", 1.0) + os.getenv("DD_GOOGLE_GENERATIVEAI_SPAN_PROMPT_COMPLETION_SAMPLE_RATE", 1.0) ), - "span_char_limit": int(os.getenv("DD_GOOGLE_GENAI_SPAN_CHAR_LIMIT", 128)), + "span_char_limit": int(os.getenv("DD_GOOGLE_GENERATIVEAI_SPAN_CHAR_LIMIT", 128)), }, ) @@ -49,7 +49,7 @@ def traced_generate(genai, pin, func, instance, args, kwargs): generations = func(*args, **kwargs) api_key = _extract_api_key(instance) if api_key: - span.set_tag("genai.request.api_key", "...{}".format(api_key[-4:])) + span.set_tag("google_generativeai.request.api_key", "...{}".format(api_key[-4:])) if stream: return TracedGenerateContentResponse(generations, instance, integration, span, args, kwargs) tag_response(span, generations, integration, instance) diff --git a/ddtrace/llmobs/_integrations/anthropic.py b/ddtrace/llmobs/_integrations/anthropic.py index 52c9344863c..b25c0f1b397 100644 --- a/ddtrace/llmobs/_integrations/anthropic.py +++ b/ddtrace/llmobs/_integrations/anthropic.py @@ -17,6 +17,7 @@ from ddtrace.llmobs._constants import OUTPUT_TOKENS_METRIC_KEY from ddtrace.llmobs._constants import SPAN_KIND from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY +from ddtrace.llmobs._utils import _get_attr from .base import BaseLLMIntegration @@ -205,11 +206,3 @@ def _get_llmobs_metrics_tags(span): if total_tokens is not None: usage[TOTAL_TOKENS_METRIC_KEY] = total_tokens return usage - - -def _get_attr(o: Any, attr: str, default: Any): - # Since our response may be a dict or object, convenience method - if isinstance(o, dict): - return o.get(attr, default) - else: - return getattr(o, attr, default) diff --git a/ddtrace/llmobs/_integrations/gemini.py b/ddtrace/llmobs/_integrations/gemini.py index 16cf714f4c4..03a8ef8e03a 100644 --- a/ddtrace/llmobs/_integrations/gemini.py +++ b/ddtrace/llmobs/_integrations/gemini.py @@ -13,6 +13,6 @@ def _set_base_span_tags( self, span: Span, provider: Optional[str] = None, model: Optional[str] = None, **kwargs: Dict[str, Any] ) -> None: if provider: - span.set_tag_str("genai.request.model", model) + span.set_tag_str("google_generativeai.request.model", model) if model: - span.set_tag_str("genai.request.provider", provider) + span.set_tag_str("google_generativeai.request.provider", provider) diff --git a/ddtrace/llmobs/_utils.py b/ddtrace/llmobs/_utils.py index 7e7ff192b67..e317e15b5d8 100644 --- a/ddtrace/llmobs/_utils.py +++ b/ddtrace/llmobs/_utils.py @@ -16,6 +16,13 @@ log = get_logger(__name__) +def _get_attr(o: object, attr: str, default: object): + # Convenience method to get an attribute from an object or dict + if isinstance(o, dict): + return o.get(attr, default) + return getattr(o, attr, default) + + def _get_nearest_llmobs_ancestor(span: Span) -> Optional[Span]: """Return the nearest LLMObs-type ancestor span of a given span.""" parent = span._parent diff --git a/tests/contrib/google_generativeai/test_google_generativeai.py b/tests/contrib/google_generativeai/test_google_generativeai.py index 5581f3dd7cc..e28e88d61b1 100644 --- a/tests/contrib/google_generativeai/test_google_generativeai.py +++ b/tests/contrib/google_generativeai/test_google_generativeai.py @@ -40,8 +40,8 @@ def test_global_tags(genai, mock_client, mock_tracer): assert span.service == "test-svc" assert span.get_tag("env") == "staging" assert span.get_tag("version") == "1234" - assert span.get_tag("genai.request.model") == "gemini-1.5-flash" - assert span.get_tag("genai.request.api_key") == "...key>" + assert span.get_tag("google_generativeai.request.model") == "gemini-1.5-flash" + assert span.get_tag("google_generativeai.request.api_key") == "...key>" @pytest.mark.snapshot @@ -128,11 +128,11 @@ async def test_gemini_completion_multiple_messages_async(genai, mock_client_asyn @pytest.mark.snapshot( token="tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_multiple_messages", ignores=[ # send_message does not include all config options by default - "meta.genai.request.generation_config.candidate_count", - "meta.genai.request.generation_config.top_k", - "meta.genai.request.generation_config.top_p", - "meta.genai.request.generation_config.response_mime_type", - "meta.genai.request.generation_config.response_schema", + "meta.google_generativeai.request.generation_config.candidate_count", + "meta.google_generativeai.request.generation_config.top_k", + "meta.google_generativeai.request.generation_config.top_p", + "meta.google_generativeai.request.generation_config.response_mime_type", + "meta.google_generativeai.request.generation_config.response_schema", ], ) def test_gemini_chat_completion(genai, mock_client): @@ -154,11 +154,11 @@ def test_gemini_chat_completion(genai, mock_client): token="tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_multiple_messages", ignores=[ # send_message does not include all config options by default "resource", - "meta.genai.request.generation_config.candidate_count", - "meta.genai.request.generation_config.top_k", - "meta.genai.request.generation_config.top_p", - "meta.genai.request.generation_config.response_mime_type", - "meta.genai.request.generation_config.response_schema", + "meta.google_generativeai.request.generation_config.candidate_count", + "meta.google_generativeai.request.generation_config.top_k", + "meta.google_generativeai.request.generation_config.top_p", + "meta.google_generativeai.request.generation_config.response_mime_type", + "meta.google_generativeai.request.generation_config.response_schema", ], ) async def test_gemini_chat_completion_async(genai, mock_client_async): @@ -331,7 +331,7 @@ async def test_gemini_completion_tool_stream_async(genai, mock_client_async): pass -@pytest.mark.snapshot(ignores=["meta.genai.request.contents.0.text"]) +@pytest.mark.snapshot(ignores=["meta.google_generativeai.request.contents.0.text"]) def test_gemini_completion_image(genai, mock_client): """Ensure passing images to generate_content() won't break patching.""" img = Image.open(os.path.join(os.path.dirname(__file__), "test_data/apple.jpg")) @@ -345,7 +345,7 @@ def test_gemini_completion_image(genai, mock_client): @pytest.mark.snapshot( token="tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image", - ignores=["resource", "meta.genai.request.contents.0.text"], + ignores=["resource", "meta.google_generativeai.request.contents.0.text"], ) async def test_gemini_completion_image_async(genai, mock_client_async): """Ensure passing images to generate_content() won't break patching.""" diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion.json index e9d853e2663..a5ffadb4664 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion.json @@ -10,35 +10,30 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "66da21e900000000", - "genai.request.api_key": "...key>", - "genai.request.contents.0.text": "What is the argument for LeBron James being the GOAT?", - "genai.request.generation_config.candidate_count": "None", - "genai.request.generation_config.max_output_tokens": "35", - "genai.request.generation_config.response_mime_type": "None", - "genai.request.generation_config.response_schema": "None", - "genai.request.generation_config.stop_sequences": "['x']", - "genai.request.generation_config.temperature": "1.0", - "genai.request.generation_config.top_k": "None", - "genai.request.generation_config.top_p": "None", - "genai.request.model": "gemini-1.5-flash", - "genai.request.provider": "google", - "genai.response.candidates.0.content.parts.0.text": "The argument for LeBron James being the 'Greatest of All Time' (GOAT) is multifaceted and involves a variety of factors. Here's ...", - "genai.response.candidates.0.content.role": "model", - "genai.response.candidates.0.finish_reason": "2", + "_dd.p.tid": "66e1da0a00000000", + "google_generativeai.request.api_key": "...key>", + "google_generativeai.request.contents.0.text": "What is the argument for LeBron James being the GOAT?", + "google_generativeai.request.generation_config.max_output_tokens": "35", + "google_generativeai.request.generation_config.stop_sequences": "['x']", + "google_generativeai.request.generation_config.temperature": "1.0", + "google_generativeai.request.model": "gemini-1.5-flash", + "google_generativeai.request.provider": "google", + "google_generativeai.response.candidates.0.content.parts.0.text": "The argument for LeBron James being the 'Greatest of All Time' (GOAT) is multifaceted and involves a variety of factors. Here's ...", + "google_generativeai.response.candidates.0.content.role": "model", + "google_generativeai.response.candidates.0.finish_reason": "2", "language": "python", - "runtime-id": "f88d6bc3be7e40949d80435242c79aee" + "runtime-id": "bd8636b7d4bb4b3abc2d4bc7129ec109" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "genai.response.usage.completion_tokens": 30, - "genai.response.usage.prompt_tokens": 12, - "genai.response.usage.total_tokens": 42, - "process_id": 39040 + "google_generativeai.response.usage.completion_tokens": 30, + "google_generativeai.response.usage.prompt_tokens": 12, + "google_generativeai.response.usage.total_tokens": 42, + "process_id": 7954 }, - "duration": 579239000, - "start": 1725571561366580000 + "duration": 338000, + "start": 1726077450580144000 }]] diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_error.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_error.json index 816427df038..2425346e4aa 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_error.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_error.json @@ -10,31 +10,26 @@ "error": 1, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "66db429700000000", + "_dd.p.tid": "66e1da0a00000000", "error.message": "400 Invalid API key. Please pass a valid API key.", - "error.stack": "Traceback (most recent call last):\n File \"/Users/yun.kim/go/src/github.com/DataDog/dd-trace-py/ddtrace/contrib/internal/google_generativeai/patch.py\", line 47, in traced_generate\n generations = func(*args, **kwargs)\n File \"/Users/yun.kim/go/src/github.com/DataDog/dd-trace-py/.riot/venv_py3105_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_pytest-asyncio_google-generativeai~072/lib/python3.10/site-packages/google/generativeai/generative_models.py\", line 331, in generate_content\n response = self._client.generate_content(\n File \"/Users/yun.kim/go/src/github.com/DataDog/dd-trace-py/.riot/venv_py3105_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_pytest-asyncio_google-generativeai~072/lib/python3.10/site-packages/mock/mock.py\", line 1178, in __call__\n return _mock_self._mock_call(*args, **kwargs)\n File \"/Users/yun.kim/go/src/github.com/DataDog/dd-trace-py/.riot/venv_py3105_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_pytest-asyncio_google-generativeai~072/lib/python3.10/site-packages/mock/mock.py\", line 1182, in _mock_call\n return _mock_self._execute_mock_call(*args, **kwargs)\n File \"/Users/yun.kim/go/src/github.com/DataDog/dd-trace-py/.riot/venv_py3105_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_pytest-asyncio_google-generativeai~072/lib/python3.10/site-packages/mock/mock.py\", line 1239, in _execute_mock_call\n raise effect\ngoogle.api_core.exceptions.InvalidArgument: 400 Invalid API key. Please pass a valid API key.\n", + "error.stack": "Traceback (most recent call last):\n File \"/Users/yun.kim/go/src/github.com/DataDog/dd-trace-py/ddtrace/contrib/internal/google_generativeai/patch.py\", line 49, in traced_generate\n generations = func(*args, **kwargs)\n File \"/Users/yun.kim/go/src/github.com/DataDog/dd-trace-py/.riot/venv_py3105_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_pytest-asyncio_google-generativeai_pillow/lib/python3.10/site-packages/google/generativeai/generative_models.py\", line 331, in generate_content\n response = self._client.generate_content(\n File \"/Users/yun.kim/go/src/github.com/DataDog/dd-trace-py/.riot/venv_py3105_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_pytest-asyncio_google-generativeai_pillow/lib/python3.10/site-packages/mock/mock.py\", line 1178, in __call__\n return _mock_self._mock_call(*args, **kwargs)\n File \"/Users/yun.kim/go/src/github.com/DataDog/dd-trace-py/.riot/venv_py3105_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_pytest-asyncio_google-generativeai_pillow/lib/python3.10/site-packages/mock/mock.py\", line 1182, in _mock_call\n return _mock_self._execute_mock_call(*args, **kwargs)\n File \"/Users/yun.kim/go/src/github.com/DataDog/dd-trace-py/.riot/venv_py3105_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_pytest-asyncio_google-generativeai_pillow/lib/python3.10/site-packages/mock/mock.py\", line 1239, in _execute_mock_call\n raise effect\ngoogle.api_core.exceptions.InvalidArgument: 400 Invalid API key. Please pass a valid API key.\n", "error.type": "google.api_core.exceptions.InvalidArgument", - "genai.request.contents.0.text": "What is the argument for LeBron James being the GOAT?", - "genai.request.generation_config.candidate_count": "None", - "genai.request.generation_config.max_output_tokens": "30", - "genai.request.generation_config.response_mime_type": "None", - "genai.request.generation_config.response_schema": "None", - "genai.request.generation_config.stop_sequences": "['x']", - "genai.request.generation_config.temperature": "1.0", - "genai.request.generation_config.top_k": "None", - "genai.request.generation_config.top_p": "None", - "genai.request.model": "gemini-1.5-flash", - "genai.request.provider": "google", + "google_generativeai.request.contents.0.text": "What is the argument for LeBron James being the GOAT?", + "google_generativeai.request.generation_config.max_output_tokens": "30", + "google_generativeai.request.generation_config.stop_sequences": "['x']", + "google_generativeai.request.generation_config.temperature": "1.0", + "google_generativeai.request.model": "gemini-1.5-flash", + "google_generativeai.request.provider": "google", "language": "python", - "runtime-id": "c20f6ea1fd834b0094c087a8dd7550ec" + "runtime-id": "bd8636b7d4bb4b3abc2d4bc7129ec109" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 85884 + "process_id": 7954 }, - "duration": 2204000, - "start": 1725645463502274000 + "duration": 2178000, + "start": 1726077450617086000 }]] diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json index bf57184f6b4..b1617104501 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_image.json @@ -10,36 +10,31 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "66df5b6b00000000", - "genai.request.api_key": "...key>", - "genai.request.contents.0.text": "[Non-text content object: ]", - "genai.request.contents.1.text": "Return a bounding box for the apple. \\n [ymin, xmin, ymax, xmax]", - "genai.request.generation_config.candidate_count": "None", - "genai.request.generation_config.max_output_tokens": "30", - "genai.request.generation_config.response_mime_type": "None", - "genai.request.generation_config.response_schema": "None", - "genai.request.generation_config.stop_sequences": "['x']", - "genai.request.generation_config.temperature": "1.0", - "genai.request.generation_config.top_k": "None", - "genai.request.generation_config.top_p": "None", - "genai.request.model": "gemini-1.5-flash", - "genai.request.provider": "google", - "genai.response.candidates.0.content.parts.0.text": "57 100 900 911", - "genai.response.candidates.0.content.role": "model", - "genai.response.candidates.0.finish_reason": "2", + "_dd.p.tid": "66e1da0a00000000", + "google_generativeai.request.api_key": "...key>", + "google_generativeai.request.contents.0.text": "[Non-text content object: ]", + "google_generativeai.request.contents.1.text": "Return a bounding box for the apple. \\n [ymin, xmin, ymax, xmax]", + "google_generativeai.request.generation_config.max_output_tokens": "30", + "google_generativeai.request.generation_config.stop_sequences": "['x']", + "google_generativeai.request.generation_config.temperature": "1.0", + "google_generativeai.request.model": "gemini-1.5-flash", + "google_generativeai.request.provider": "google", + "google_generativeai.response.candidates.0.content.parts.0.text": "57 100 900 911", + "google_generativeai.response.candidates.0.content.role": "model", + "google_generativeai.response.candidates.0.finish_reason": "2", "language": "python", - "runtime-id": "ef31792639f64e3a94a13cb358079ca3" + "runtime-id": "bd8636b7d4bb4b3abc2d4bc7129ec109" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "genai.response.usage.completion_tokens": 14, - "genai.response.usage.prompt_tokens": 277, - "genai.response.usage.total_tokens": 291, - "process_id": 20810 + "google_generativeai.response.usage.completion_tokens": 14, + "google_generativeai.response.usage.prompt_tokens": 277, + "google_generativeai.response.usage.total_tokens": 291, + "process_id": 7954 }, - "duration": 4446000, - "start": 1725913963608949000 + "duration": 6354000, + "start": 1726077450965991000 }]] diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_multiple_messages.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_multiple_messages.json index 68685e253d9..87fa8ebed7f 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_multiple_messages.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_multiple_messages.json @@ -10,40 +10,35 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "66db5e4600000000", - "genai.request.api_key": "...key>", - "genai.request.contents.0.parts.0.text": "Hello world!", - "genai.request.contents.0.role": "user", - "genai.request.contents.1.parts.0.text": "Great to meet you. What would you like to know?", - "genai.request.contents.1.role": "model", - "genai.request.contents.2.parts.0.text": "Why is the sky blue?", - "genai.request.contents.2.role": "user", - "genai.request.generation_config.candidate_count": "None", - "genai.request.generation_config.max_output_tokens": "35", - "genai.request.generation_config.response_mime_type": "None", - "genai.request.generation_config.response_schema": "None", - "genai.request.generation_config.stop_sequences": "['x']", - "genai.request.generation_config.temperature": "1.0", - "genai.request.generation_config.top_k": "None", - "genai.request.generation_config.top_p": "None", - "genai.request.model": "gemini-1.5-flash", - "genai.request.provider": "google", - "genai.response.candidates.0.content.parts.0.text": "The sky appears blue due to a phenomenon called **Rayleigh scattering**. \\nHere's how it works:* **Sunlight is made up of all co...", - "genai.response.candidates.0.content.role": "model", - "genai.response.candidates.0.finish_reason": "2", + "_dd.p.tid": "66e1da0a00000000", + "google_generativeai.request.api_key": "...key>", + "google_generativeai.request.contents.0.parts.0.text": "Hello world!", + "google_generativeai.request.contents.0.role": "user", + "google_generativeai.request.contents.1.parts.0.text": "Great to meet you. What would you like to know?", + "google_generativeai.request.contents.1.role": "model", + "google_generativeai.request.contents.2.parts.0.text": "Why is the sky blue?", + "google_generativeai.request.contents.2.role": "user", + "google_generativeai.request.generation_config.max_output_tokens": "35", + "google_generativeai.request.generation_config.stop_sequences": "['x']", + "google_generativeai.request.generation_config.temperature": "1.0", + "google_generativeai.request.model": "gemini-1.5-flash", + "google_generativeai.request.provider": "google", + "google_generativeai.response.candidates.0.content.parts.0.text": "The sky appears blue due to a phenomenon called **Rayleigh scattering**. \\nHere's how it works:* **Sunlight is made up of all co...", + "google_generativeai.response.candidates.0.content.role": "model", + "google_generativeai.response.candidates.0.finish_reason": "2", "language": "python", - "runtime-id": "9efce87e47184fb8bec5228e67b84e90" + "runtime-id": "bd8636b7d4bb4b3abc2d4bc7129ec109" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "genai.response.usage.completion_tokens": 35, - "genai.response.usage.prompt_tokens": 24, - "genai.response.usage.total_tokens": 59, - "process_id": 13297 + "google_generativeai.response.usage.completion_tokens": 35, + "google_generativeai.response.usage.prompt_tokens": 24, + "google_generativeai.response.usage.total_tokens": 59, + "process_id": 7954 }, - "duration": 613000, - "start": 1725652550194481000 + "duration": 646000, + "start": 1726077450643372000 }]] diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_stream.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_stream.json index 569fefef6fb..73bab941858 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_stream.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_stream.json @@ -10,36 +10,31 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "66df35a500000000", - "genai.request.api_key": "...key>", - "genai.request.contents.0.text": "Can you recite the alphabet?", - "genai.request.generation_config.candidate_count": "None", - "genai.request.generation_config.max_output_tokens": "60", - "genai.request.generation_config.response_mime_type": "None", - "genai.request.generation_config.response_schema": "None", - "genai.request.generation_config.stop_sequences": "['x']", - "genai.request.generation_config.temperature": "1.0", - "genai.request.generation_config.top_k": "None", - "genai.request.generation_config.top_p": "None", - "genai.request.model": "gemini-1.5-flash", - "genai.request.provider": "google", - "genai.request.stream": "True", - "genai.response.candidates.0.content.parts.0.text": "A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z.\\n", - "genai.response.candidates.0.content.role": "model", - "genai.response.candidates.0.finish_reason": "2", + "_dd.p.tid": "66e1da0a00000000", + "google_generativeai.request.api_key": "...key>", + "google_generativeai.request.contents.0.text": "Can you recite the alphabet?", + "google_generativeai.request.generation_config.max_output_tokens": "60", + "google_generativeai.request.generation_config.stop_sequences": "['x']", + "google_generativeai.request.generation_config.temperature": "1.0", + "google_generativeai.request.model": "gemini-1.5-flash", + "google_generativeai.request.provider": "google", + "google_generativeai.request.stream": "True", + "google_generativeai.response.candidates.0.content.parts.0.text": "A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z.\\n", + "google_generativeai.response.candidates.0.content.role": "model", + "google_generativeai.response.candidates.0.finish_reason": "2", "language": "python", - "runtime-id": "ab34a9e677524b1b85ca63038b94e284" + "runtime-id": "bd8636b7d4bb4b3abc2d4bc7129ec109" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "genai.response.usage.completion_tokens": 52, - "genai.response.usage.prompt_tokens": 6, - "genai.response.usage.total_tokens": 58, - "process_id": 85545 + "google_generativeai.response.usage.completion_tokens": 52, + "google_generativeai.response.usage.prompt_tokens": 6, + "google_generativeai.response.usage.total_tokens": 58, + "process_id": 7954 }, - "duration": 1269000, - "start": 1725904293506230000 + "duration": 1455000, + "start": 1726077450715754000 }]] diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_system_prompt.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_system_prompt.json index 192b9f55902..b9d97a50a97 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_system_prompt.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_system_prompt.json @@ -10,36 +10,31 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "66df2acb00000000", - "genai.request.api_key": "...key>", - "genai.request.contents.0.text": "What is the argument for LeBron James being the GOAT?", - "genai.request.generation_config.candidate_count": "None", - "genai.request.generation_config.max_output_tokens": "50", - "genai.request.generation_config.response_mime_type": "None", - "genai.request.generation_config.response_schema": "None", - "genai.request.generation_config.stop_sequences": "['x']", - "genai.request.generation_config.temperature": "1.0", - "genai.request.generation_config.top_k": "None", - "genai.request.generation_config.top_p": "None", - "genai.request.model": "gemini-1.5-flash", - "genai.request.provider": "google", - "genai.request.system_instruction.0.text": "You are a die-hard Michael Jordan fan that always brings stats to the discussion.", - "genai.response.candidates.0.content.parts.0.text": "Look, I respect LeBron James. He's a phenomenal player, an incredible athlete, and a great ambassador for the game. But when it ...", - "genai.response.candidates.0.content.role": "model", - "genai.response.candidates.0.finish_reason": "2", + "_dd.p.tid": "66e1da0a00000000", + "google_generativeai.request.api_key": "...key>", + "google_generativeai.request.contents.0.text": "What is the argument for LeBron James being the GOAT?", + "google_generativeai.request.generation_config.max_output_tokens": "50", + "google_generativeai.request.generation_config.stop_sequences": "['x']", + "google_generativeai.request.generation_config.temperature": "1.0", + "google_generativeai.request.model": "gemini-1.5-flash", + "google_generativeai.request.provider": "google", + "google_generativeai.request.system_instruction.0.text": "You are a die-hard Michael Jordan fan that always brings stats to the discussion.", + "google_generativeai.response.candidates.0.content.parts.0.text": "Look, I respect LeBron James. He's a phenomenal player, an incredible athlete, and a great ambassador for the game. But when it ...", + "google_generativeai.response.candidates.0.content.role": "model", + "google_generativeai.response.candidates.0.finish_reason": "2", "language": "python", - "runtime-id": "14742cbdf7694bc5b250790e5a9985f1" + "runtime-id": "bd8636b7d4bb4b3abc2d4bc7129ec109" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "genai.response.usage.completion_tokens": 45, - "genai.response.usage.prompt_tokens": 29, - "genai.response.usage.total_tokens": 74, - "process_id": 77455 + "google_generativeai.response.usage.completion_tokens": 45, + "google_generativeai.response.usage.prompt_tokens": 29, + "google_generativeai.response.usage.total_tokens": 74, + "process_id": 7954 }, - "duration": 339000, - "start": 1725901515291773000 + "duration": 383000, + "start": 1726077450691866000 }]] diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_tool_stream.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_tool_stream.json index 1c1a87b345f..7871efb1fa1 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_tool_stream.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_completion_tool_stream.json @@ -10,38 +10,33 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "66df431200000000", - "genai.request.api_key": "...key>", - "genai.request.contents.0.text": "Dim the lights so the room feels cozy and warm.", - "genai.request.generation_config.candidate_count": "None", - "genai.request.generation_config.max_output_tokens": "30", - "genai.request.generation_config.response_mime_type": "None", - "genai.request.generation_config.response_schema": "None", - "genai.request.generation_config.stop_sequences": "['x']", - "genai.request.generation_config.temperature": "1.0", - "genai.request.generation_config.top_k": "None", - "genai.request.generation_config.top_p": "None", - "genai.request.model": "gemini-1.5-flash", - "genai.request.provider": "google", - "genai.request.stream": "True", - "genai.response.candidates.0.content.parts.0.function_call.args": "{'fields': [{'value': 'warm', 'key': 'color_temp'}, {'value': 50.0, 'key': 'brightness'}]}", - "genai.response.candidates.0.content.parts.0.function_call.name": "set_light_values", - "genai.response.candidates.0.content.parts.0.text": "", - "genai.response.candidates.0.content.role": "model", - "genai.response.candidates.0.finish_reason": "2", + "_dd.p.tid": "66e1da0a00000000", + "google_generativeai.request.api_key": "...key>", + "google_generativeai.request.contents.0.text": "Dim the lights so the room feels cozy and warm.", + "google_generativeai.request.generation_config.max_output_tokens": "30", + "google_generativeai.request.generation_config.stop_sequences": "['x']", + "google_generativeai.request.generation_config.temperature": "1.0", + "google_generativeai.request.model": "gemini-1.5-flash", + "google_generativeai.request.provider": "google", + "google_generativeai.request.stream": "True", + "google_generativeai.response.candidates.0.content.parts.0.function_call.args": "{'fields': [{'value': 'warm', 'key': 'color_temp'}, {'value': 50.0, 'key': 'brightness'}]}", + "google_generativeai.response.candidates.0.content.parts.0.function_call.name": "set_light_values", + "google_generativeai.response.candidates.0.content.parts.0.text": "", + "google_generativeai.response.candidates.0.content.role": "model", + "google_generativeai.response.candidates.0.finish_reason": "2", "language": "python", - "runtime-id": "3188ecf703b5409ab3405dfd2c201aa6" + "runtime-id": "bd8636b7d4bb4b3abc2d4bc7129ec109" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "genai.response.usage.completion_tokens": 25, - "genai.response.usage.prompt_tokens": 150, - "genai.response.usage.total_tokens": 175, - "process_id": 96885 + "google_generativeai.response.usage.completion_tokens": 25, + "google_generativeai.response.usage.prompt_tokens": 150, + "google_generativeai.response.usage.total_tokens": 175, + "process_id": 7954 }, - "duration": 721000, - "start": 1725907730514245000 + "duration": 29811000, + "start": 1726077450897294000 }]] diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_chat_completion.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_chat_completion.json index 87928bd5dbc..08754d32b29 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_chat_completion.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_chat_completion.json @@ -10,32 +10,32 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "66db74d900000000", - "genai.request.api_key": "...key>", - "genai.request.contents.0.parts.0.text": "Dim the lights so the room feels cozy and warm.", - "genai.request.contents.0.role": "user", - "genai.request.model": "gemini-1.5-flash", - "genai.request.provider": "google", - "genai.response.candidates.0.content.parts.0.function_call.args": "{'fields': [{'value': 'warm', 'key': 'color_temp'}, {'value': 50.0, 'key': 'brightness'}]}", - "genai.response.candidates.0.content.parts.0.function_call.name": "set_light_values", - "genai.response.candidates.0.content.parts.0.text": "", - "genai.response.candidates.0.content.role": "model", - "genai.response.candidates.0.finish_reason": "2", + "_dd.p.tid": "66e1da0a00000000", + "google_generativeai.request.api_key": "...key>", + "google_generativeai.request.contents.0.parts.0.text": "Dim the lights so the room feels cozy and warm.", + "google_generativeai.request.contents.0.role": "user", + "google_generativeai.request.model": "gemini-1.5-flash", + "google_generativeai.request.provider": "google", + "google_generativeai.response.candidates.0.content.parts.0.function_call.args": "{'fields': [{'value': 'warm', 'key': 'color_temp'}, {'value': 50.0, 'key': 'brightness'}]}", + "google_generativeai.response.candidates.0.content.parts.0.function_call.name": "set_light_values", + "google_generativeai.response.candidates.0.content.parts.0.text": "", + "google_generativeai.response.candidates.0.content.role": "model", + "google_generativeai.response.candidates.0.finish_reason": "2", "language": "python", - "runtime-id": "22733330fbbe48118bd9b9d1d58d6ee4" + "runtime-id": "bd8636b7d4bb4b3abc2d4bc7129ec109" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "genai.response.usage.completion_tokens": 25, - "genai.response.usage.prompt_tokens": 150, - "genai.response.usage.total_tokens": 175, - "process_id": 35758 + "google_generativeai.response.usage.completion_tokens": 25, + "google_generativeai.response.usage.prompt_tokens": 150, + "google_generativeai.response.usage.total_tokens": 175, + "process_id": 7954 }, - "duration": 569000, - "start": 1725658329842913000 + "duration": 346000, + "start": 1726077450866031000 }], [ { @@ -49,36 +49,36 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "66db74d900000000", - "genai.request.api_key": "...key>", - "genai.request.contents.0.parts.0.text": "Dim the lights so the room feels cozy and warm.", - "genai.request.contents.0.role": "user", - "genai.request.contents.1.parts.0.function_call.args": "{'fields': [{'value': 'warm', 'key': 'color_temp'}, {'value': 50.0, 'key': 'brightness'}]}", - "genai.request.contents.1.parts.0.function_call.name": "set_light_values", - "genai.request.contents.1.parts.0.text": "", - "genai.request.contents.1.role": "model", - "genai.request.contents.2.parts.0.function_response.name": "set_light_values", - "genai.request.contents.2.parts.0.function_response.response": "{'result': {'color_temperature': 'warm', 'brightness': 50.0}}", - "genai.request.contents.2.parts.0.text": "", - "genai.request.contents.2.role": "user", - "genai.request.model": "gemini-1.5-flash", - "genai.request.provider": "google", - "genai.response.candidates.0.content.parts.0.text": "OK. I've dimmed the lights to 50% and set the color temperature to warm. How's that? \\n", - "genai.response.candidates.0.content.role": "model", - "genai.response.candidates.0.finish_reason": "2", + "_dd.p.tid": "66e1da0a00000000", + "google_generativeai.request.api_key": "...key>", + "google_generativeai.request.contents.0.parts.0.text": "Dim the lights so the room feels cozy and warm.", + "google_generativeai.request.contents.0.role": "user", + "google_generativeai.request.contents.1.parts.0.function_call.args": "{'fields': [{'value': 'warm', 'key': 'color_temp'}, {'value': 50.0, 'key': 'brightness'}]}", + "google_generativeai.request.contents.1.parts.0.function_call.name": "set_light_values", + "google_generativeai.request.contents.1.parts.0.text": "", + "google_generativeai.request.contents.1.role": "model", + "google_generativeai.request.contents.2.parts.0.function_response.name": "set_light_values", + "google_generativeai.request.contents.2.parts.0.function_response.response": "{'result': {'color_temperature': 'warm', 'brightness': 50.0}}", + "google_generativeai.request.contents.2.parts.0.text": "", + "google_generativeai.request.contents.2.role": "user", + "google_generativeai.request.model": "gemini-1.5-flash", + "google_generativeai.request.provider": "google", + "google_generativeai.response.candidates.0.content.parts.0.text": "OK. I've dimmed the lights to 50% and set the color temperature to warm. How's that? \\n", + "google_generativeai.response.candidates.0.content.role": "model", + "google_generativeai.response.candidates.0.finish_reason": "2", "language": "python", - "runtime-id": "22733330fbbe48118bd9b9d1d58d6ee4" + "runtime-id": "bd8636b7d4bb4b3abc2d4bc7129ec109" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "genai.response.usage.completion_tokens": 27, - "genai.response.usage.prompt_tokens": 206, - "genai.response.usage.total_tokens": 233, - "process_id": 35758 + "google_generativeai.response.usage.completion_tokens": 27, + "google_generativeai.response.usage.prompt_tokens": 206, + "google_generativeai.response.usage.total_tokens": 233, + "process_id": 7954 }, - "duration": 397000, - "start": 1725658329845583000 + "duration": 419000, + "start": 1726077450866762000 }]] diff --git a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_completion.json b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_completion.json index 16aabb989a1..f8ca90ac9f5 100644 --- a/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_completion.json +++ b/tests/snapshots/tests.contrib.google_generativeai.test_google_generativeai.test_gemini_tool_completion.json @@ -10,37 +10,32 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "66db701d00000000", - "genai.request.api_key": "...key>", - "genai.request.contents.0.text": "Dim the lights so the room feels cozy and warm.", - "genai.request.generation_config.candidate_count": "None", - "genai.request.generation_config.max_output_tokens": "30", - "genai.request.generation_config.response_mime_type": "None", - "genai.request.generation_config.response_schema": "None", - "genai.request.generation_config.stop_sequences": "['x']", - "genai.request.generation_config.temperature": "1.0", - "genai.request.generation_config.top_k": "None", - "genai.request.generation_config.top_p": "None", - "genai.request.model": "gemini-1.5-flash", - "genai.request.provider": "google", - "genai.response.candidates.0.content.parts.0.function_call.args": "{'fields': [{'value': 'warm', 'key': 'color_temp'}, {'value': 50.0, 'key': 'brightness'}]}", - "genai.response.candidates.0.content.parts.0.function_call.name": "set_light_values", - "genai.response.candidates.0.content.parts.0.text": "", - "genai.response.candidates.0.content.role": "model", - "genai.response.candidates.0.finish_reason": "2", + "_dd.p.tid": "66e1da0a00000000", + "google_generativeai.request.api_key": "...key>", + "google_generativeai.request.contents.0.text": "Dim the lights so the room feels cozy and warm.", + "google_generativeai.request.generation_config.max_output_tokens": "30", + "google_generativeai.request.generation_config.stop_sequences": "['x']", + "google_generativeai.request.generation_config.temperature": "1.0", + "google_generativeai.request.model": "gemini-1.5-flash", + "google_generativeai.request.provider": "google", + "google_generativeai.response.candidates.0.content.parts.0.function_call.args": "{'fields': [{'value': 'warm', 'key': 'color_temp'}, {'value': 50.0, 'key': 'brightness'}]}", + "google_generativeai.response.candidates.0.content.parts.0.function_call.name": "set_light_values", + "google_generativeai.response.candidates.0.content.parts.0.text": "", + "google_generativeai.response.candidates.0.content.role": "model", + "google_generativeai.response.candidates.0.finish_reason": "2", "language": "python", - "runtime-id": "50c5d175cda64dabac9f8662880d7ca5" + "runtime-id": "bd8636b7d4bb4b3abc2d4bc7129ec109" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "genai.response.usage.completion_tokens": 25, - "genai.response.usage.prompt_tokens": 150, - "genai.response.usage.total_tokens": 175, - "process_id": 31686 + "google_generativeai.response.usage.completion_tokens": 25, + "google_generativeai.response.usage.prompt_tokens": 150, + "google_generativeai.response.usage.total_tokens": 175, + "process_id": 7954 }, - "duration": 627000, - "start": 1725657117233350000 + "duration": 418000, + "start": 1726077450837094000 }]] From 37830d40d9941fc3a4e4f9ceedb61950112b0000 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Wed, 11 Sep 2024 14:44:01 -0400 Subject: [PATCH 28/32] Do not assume default roles if not provided --- ddtrace/llmobs/_integrations/gemini.py | 44 ++++++++++++------- .../test_google_generativeai_llmobs.py | 32 +++++++------- tests/llmobs/_utils.py | 2 +- 3 files changed, 44 insertions(+), 34 deletions(-) diff --git a/ddtrace/llmobs/_integrations/gemini.py b/ddtrace/llmobs/_integrations/gemini.py index 80008b946f6..e55c270f28e 100644 --- a/ddtrace/llmobs/_integrations/gemini.py +++ b/ddtrace/llmobs/_integrations/gemini.py @@ -1,6 +1,7 @@ import json from typing import Any from typing import Dict +from typing import Iterable from typing import List from typing import Optional @@ -27,9 +28,9 @@ def _set_base_span_tags( self, span: Span, provider: Optional[str] = None, model: Optional[str] = None, **kwargs: Dict[str, object] ) -> None: if provider is not None: - span.set_tag_str("genai.request.model", str(model)) + span.set_tag_str("google_generativeai.request.model", str(model)) if model is not None: - span.set_tag_str("genai.request.provider", str(provider)) + span.set_tag_str("google_generativeai.request.provider", str(provider)) def llmobs_set_tags( self, span: Span, args: List[Any], kwargs: Dict[str, Any], instance: Any, generations: Any = None @@ -38,8 +39,8 @@ def llmobs_set_tags( return span.set_tag_str(SPAN_KIND, "llm") - span.set_tag_str(MODEL_NAME, span.get_tag("genai.request.model") or "") - span.set_tag_str(MODEL_PROVIDER, span.get_tag("genai.request.provider") or "") + span.set_tag_str(MODEL_NAME, span.get_tag("google_generativeai.request.model") or "") + span.set_tag_str(MODEL_PROVIDER, span.get_tag("google_generativeai.request.provider") or "") metadata = self._llmobs_set_metadata(kwargs, instance) span.set_tag_str(METADATA, json.dumps(metadata, default=_unserializable_default_repr)) @@ -77,7 +78,9 @@ def _extract_message_from_part(part, role): text = _get_attr(part, "text", "") function_call = _get_attr(part, "function_call", None) function_response = _get_attr(part, "function_response", None) - message = {"content": text, "role": role} + message = {"content": text} + if role: + message["role"] = role if function_call: function_call_dict = function_call if not isinstance(function_call, dict): @@ -98,22 +101,29 @@ def _extract_input_message(self, contents, system_instruction=None): for part in system_instruction.parts: messages.append({"content": part.text or "", "role": "system"}) if isinstance(contents, str): - messages.append({"content": contents, "role": "user"}) + messages.append({"content": contents}) return messages - elif isinstance(contents, dict): - messages.append({"content": contents.get("text", ""), "role": contents.get("role", "user")}) + if isinstance(contents, dict): + message = {"content": contents.get("text", "")} + if contents.get("role", None): + message["role"] = contents["role"] + messages.append(message) return messages - elif not isinstance(contents, list): - messages.append({"content": "[Non-text content object: {}]".format(repr(contents)), "role": "user"}) + if not isinstance(contents, list): + messages.append({"content": "[Non-text content object: {}]".format(repr(contents))}) return messages for content in contents: if isinstance(content, str): - messages.append({"content": content, "role": "user"}) + messages.append({"content": content}) continue - role = _get_attr(content, "role", "user") + role = _get_attr(content, "role", None) parts = _get_attr(content, "parts", []) - if not isinstance(parts, list): - messages.append({"content": "[Non-text content object: {}]".format(repr(content)), "role": role}) + if not parts or not isinstance(parts, Iterable): + message = {"content": "[Non-text content object: {}]".format(repr(content))} + if role: + message["role"] = role + messages.append(message) + continue for part in parts: message = self._extract_message_from_part(part, role) messages.append(message) @@ -134,9 +144,9 @@ def _extract_output_message(self, generations): @staticmethod def _get_llmobs_metrics_tags(span): usage = {} - input_tokens = span.get_metric("genai.response.usage.prompt_tokens") - output_tokens = span.get_metric("genai.response.usage.completion_tokens") - total_tokens = span.get_metric("genai.response.usage.total_tokens") + input_tokens = span.get_metric("google_generativeai.response.usage.prompt_tokens") + output_tokens = span.get_metric("google_generativeai.response.usage.completion_tokens") + total_tokens = span.get_metric("google_generativeai.response.usage.total_tokens") if input_tokens is not None: usage[INPUT_TOKENS_METRIC_KEY] = input_tokens diff --git a/tests/contrib/google_generativeai/test_google_generativeai_llmobs.py b/tests/contrib/google_generativeai/test_google_generativeai_llmobs.py index 1cc60d05b70..070ffd03d36 100644 --- a/tests/contrib/google_generativeai/test_google_generativeai_llmobs.py +++ b/tests/contrib/google_generativeai/test_google_generativeai_llmobs.py @@ -36,7 +36,7 @@ def test_completion(self, genai, ddtrace_global_config, mock_llmobs_writer, mock span, model_name="gemini-1.5-flash", model_provider="google", - input_messages=[{"content": "What is the argument for LeBron James being the GOAT?", "role": "user"}], + input_messages=[{"content": "What is the argument for LeBron James being the GOAT?"}], output_messages=[ {"content": MOCK_COMPLETION_SIMPLE_1["candidates"][0]["content"]["parts"][0]["text"], "role": "model"}, ], @@ -62,7 +62,7 @@ async def test_completion_async( span, model_name="gemini-1.5-flash", model_provider="google", - input_messages=[{"content": "What is the argument for LeBron James being the GOAT?", "role": "user"}], + input_messages=[{"content": "What is the argument for LeBron James being the GOAT?"}], output_messages=[ {"content": MOCK_COMPLETION_SIMPLE_1["candidates"][0]["content"]["parts"][0]["text"], "role": "model"} ], @@ -91,7 +91,7 @@ def test_completion_error(self, genai, ddtrace_global_config, mock_llmobs_writer span, model_name="gemini-1.5-flash", model_provider="google", - input_messages=[{"content": "What is the argument for LeBron James being the GOAT?", "role": "user"}], + input_messages=[{"content": "What is the argument for LeBron James being the GOAT?"}], output_messages=[{"content": ""}], error="google.api_core.exceptions.InvalidArgument", error_message=span.get_tag("error.message"), @@ -124,7 +124,7 @@ async def test_completion_error_async( span, model_name="gemini-1.5-flash", model_provider="google", - input_messages=[{"content": "What is the argument for LeBron James being the GOAT?", "role": "user"}], + input_messages=[{"content": "What is the argument for LeBron James being the GOAT?"}], output_messages=[{"content": ""}], error="google.api_core.exceptions.InvalidArgument", error_message=span.get_tag("error.message"), @@ -294,7 +294,7 @@ def test_completion_system_prompt(self, genai, ddtrace_global_config, mock_llmob "content": "You are a die-hard Michael Jordan fan that always brings stats to the discussion.", "role": "system", }, - {"content": "What is the argument for LeBron James being the GOAT?", "role": "user"}, + {"content": "What is the argument for LeBron James being the GOAT?"}, ], output_messages=[ { @@ -332,7 +332,7 @@ async def test_completion_system_prompt_async( "content": "You are a die-hard Michael Jordan fan that always brings stats to the discussion.", "role": "system", }, - {"content": "What is the argument for LeBron James being the GOAT?", "role": "user"}, + {"content": "What is the argument for LeBron James being the GOAT?"}, ], output_messages=[ { @@ -365,7 +365,7 @@ def test_completion_stream(self, genai, ddtrace_global_config, mock_llmobs_write span, model_name="gemini-1.5-flash", model_provider="google", - input_messages=[{"content": "Can you recite the alphabet?", "role": "user"}], + input_messages=[{"content": "Can you recite the alphabet?"}], output_messages=[ {"content": "".join(chunk["text"] for chunk in MOCK_COMPLETION_STREAM_CHUNKS), "role": "model"} ], @@ -396,7 +396,7 @@ async def test_completion_stream_async( span, model_name="gemini-1.5-flash", model_provider="google", - input_messages=[{"content": "Can you recite the alphabet?", "role": "user"}], + input_messages=[{"content": "Can you recite the alphabet?"}], output_messages=[ {"content": "".join(chunk["text"] for chunk in MOCK_COMPLETION_STREAM_CHUNKS), "role": "model"} ], @@ -420,7 +420,7 @@ def test_completion_tool_call(self, genai, ddtrace_global_config, mock_llmobs_wr span, model_name="gemini-1.5-flash", model_provider="google", - input_messages=[{"content": "Dim the lights so the room feels cozy and warm.", "role": "user"}], + input_messages=[{"content": "Dim the lights so the room feels cozy and warm."}], output_messages=[ { "content": "", @@ -457,7 +457,7 @@ async def test_completion_tool_call_async( span, model_name="gemini-1.5-flash", model_provider="google", - input_messages=[{"content": "Dim the lights so the room feels cozy and warm.", "role": "user"}], + input_messages=[{"content": "Dim the lights so the room feels cozy and warm."}], output_messages=[ { "content": "", @@ -499,7 +499,7 @@ def test_gemini_completion_tool_stream( span, model_name="gemini-1.5-flash", model_provider="google", - input_messages=[{"content": "Dim the lights so the room feels cozy and warm.", "role": "user"}], + input_messages=[{"content": "Dim the lights so the room feels cozy and warm."}], output_messages=[ { "content": "", @@ -541,7 +541,7 @@ async def test_gemini_completion_tool_stream_async( span, model_name="gemini-1.5-flash", model_provider="google", - input_messages=[{"content": "Dim the lights so the room feels cozy and warm.", "role": "user"}], + input_messages=[{"content": "Dim the lights so the room feels cozy and warm."}], output_messages=[ { "content": "", @@ -579,8 +579,8 @@ def test_gemini_completion_image(self, genai, ddtrace_global_config, mock_llmobs model_name="gemini-1.5-flash", model_provider="google", input_messages=[ - {"content": "[Non-text content object: {}]".format(repr(img)), "role": "user"}, - {"content": "Return a bounding box for the apple. \n [ymin, xmin, ymax, xmax]", "role": "user"}, + {"content": "[Non-text content object: {}]".format(repr(img))}, + {"content": "Return a bounding box for the apple. \n [ymin, xmin, ymax, xmax]"}, ], output_messages=[{"content": "57 100 900 911", "role": "model"}], metadata={"temperature": 1.0, "max_output_tokens": 30}, @@ -608,8 +608,8 @@ async def test_gemini_completion_image_async( model_name="gemini-1.5-flash", model_provider="google", input_messages=[ - {"content": "[Non-text content object: {}]".format(repr(img)), "role": "user"}, - {"content": "Return a bounding box for the apple. \n [ymin, xmin, ymax, xmax]", "role": "user"}, + {"content": "[Non-text content object: {}]".format(repr(img))}, + {"content": "Return a bounding box for the apple. \n [ymin, xmin, ymax, xmax]"}, ], output_messages=[{"content": "57 100 900 911", "role": "model"}], metadata={"temperature": 1.0, "max_output_tokens": 30}, diff --git a/tests/llmobs/_utils.py b/tests/llmobs/_utils.py index c3f1853ac10..47d8891950e 100644 --- a/tests/llmobs/_utils.py +++ b/tests/llmobs/_utils.py @@ -197,12 +197,12 @@ def _llmobs_base_span_event( "span_id": str(span.span_id), "parent_id": _get_llmobs_parent_id(span), "name": span_name, - "tags": _expected_llmobs_tags(span, tags=tags, error=error, session_id=session_id), "start_ns": span.start_ns, "duration": span.duration_ns, "status": "error" if error else "ok", "meta": {"span.kind": span_kind}, "metrics": {}, + "tags": _expected_llmobs_tags(span, tags=tags, error=error, session_id=session_id), } if session_id: span_event["session_id"] = session_id From ca44592f3ef6f2b11bd86fefcd2ad3d0e10135e1 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Wed, 11 Sep 2024 14:45:40 -0400 Subject: [PATCH 29/32] avoid silent error --- ddtrace/contrib/internal/google_generativeai/_utils.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ddtrace/contrib/internal/google_generativeai/_utils.py b/ddtrace/contrib/internal/google_generativeai/_utils.py index e671ceb7f6e..354ef47e69c 100644 --- a/ddtrace/contrib/internal/google_generativeai/_utils.py +++ b/ddtrace/contrib/internal/google_generativeai/_utils.py @@ -160,11 +160,10 @@ def tag_request(span, integration, instance, args, kwargs): system_instruction = getattr(instance, "_system_instruction", None) stream = kwargs.get("stream", None) - generation_config_dict = None try: generation_config_dict = to_generation_config_dict(generation_config) except TypeError: - pass + generation_config_dict = None if generation_config_dict is not None: for k, v in generation_config_dict.items(): span.set_tag_str("google_generativeai.request.generation_config.%s" % k, str(v)) From 7c57b590bf71a6b63aad5d2a2256782480f3739b Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Wed, 11 Sep 2024 14:46:52 -0400 Subject: [PATCH 30/32] fmt --- ddtrace/contrib/google_generativeai/__init__.py | 3 +-- ddtrace/contrib/internal/google_generativeai/_utils.py | 3 +-- ddtrace/llmobs/_integrations/gemini.py | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/ddtrace/contrib/google_generativeai/__init__.py b/ddtrace/contrib/google_generativeai/__init__.py index f838fc346d9..6255b46d086 100644 --- a/ddtrace/contrib/google_generativeai/__init__.py +++ b/ddtrace/contrib/google_generativeai/__init__.py @@ -76,8 +76,7 @@ from ddtrace import Pin, config Pin.override(genai, service="my-gemini-service") -""" - +""" # noqa: E501 from ...internal.utils.importlib import require_modules diff --git a/ddtrace/contrib/internal/google_generativeai/_utils.py b/ddtrace/contrib/internal/google_generativeai/_utils.py index 354ef47e69c..44fd1db7729 100644 --- a/ddtrace/contrib/internal/google_generativeai/_utils.py +++ b/ddtrace/contrib/internal/google_generativeai/_utils.py @@ -1,8 +1,7 @@ import sys -import wrapt -from google.generativeai.types import GenerationConfigType from google.generativeai.types.generation_types import to_generation_config_dict +import wrapt from ddtrace.internal.utils import get_argument_value from ddtrace.llmobs._utils import _get_attr diff --git a/ddtrace/llmobs/_integrations/gemini.py b/ddtrace/llmobs/_integrations/gemini.py index 03a8ef8e03a..b91d152bcf0 100644 --- a/ddtrace/llmobs/_integrations/gemini.py +++ b/ddtrace/llmobs/_integrations/gemini.py @@ -2,8 +2,8 @@ from typing import Dict from typing import Optional -from ddtrace.llmobs._integrations.base import BaseLLMIntegration from ddtrace import Span +from ddtrace.llmobs._integrations.base import BaseLLMIntegration class GeminiIntegration(BaseLLMIntegration): From eecda470782e6bd5373b1b0c6027d1fcc6894510 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Wed, 11 Sep 2024 15:26:06 -0400 Subject: [PATCH 31/32] typing --- ddtrace/llmobs/_integrations/gemini.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ddtrace/llmobs/_integrations/gemini.py b/ddtrace/llmobs/_integrations/gemini.py index b91d152bcf0..34f486cd0e8 100644 --- a/ddtrace/llmobs/_integrations/gemini.py +++ b/ddtrace/llmobs/_integrations/gemini.py @@ -12,7 +12,7 @@ class GeminiIntegration(BaseLLMIntegration): def _set_base_span_tags( self, span: Span, provider: Optional[str] = None, model: Optional[str] = None, **kwargs: Dict[str, Any] ) -> None: - if provider: - span.set_tag_str("google_generativeai.request.model", model) - if model: - span.set_tag_str("google_generativeai.request.provider", provider) + if provider is not None: + span.set_tag_str("google_generativeai.request.provider", str(provider)) + if model is not None: + span.set_tag_str("google_generativeai.request.model", str(model)) From d6379e64ac4e7e76bbd2ec42ce91cee9e8acb042 Mon Sep 17 00:00:00 2001 From: Yun Kim Date: Wed, 11 Sep 2024 15:29:03 -0400 Subject: [PATCH 32/32] merge conflict --- ddtrace/llmobs/_integrations/gemini.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ddtrace/llmobs/_integrations/gemini.py b/ddtrace/llmobs/_integrations/gemini.py index 474c5ba9100..e2c9cb63bd3 100644 --- a/ddtrace/llmobs/_integrations/gemini.py +++ b/ddtrace/llmobs/_integrations/gemini.py @@ -18,7 +18,7 @@ from ddtrace.llmobs._constants import SPAN_KIND from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY from ddtrace.llmobs._integrations.base import BaseLLMIntegration -from ddtrace.llmobs._utils import _getattr +from ddtrace.llmobs._utils import _get_attr from ddtrace.llmobs._utils import _unserializable_default_repr