diff --git a/src/betterproto/__init__.py b/src/betterproto/__init__.py new file mode 100644 index 0000000..ef4aab1 --- /dev/null +++ b/src/betterproto/__init__.py @@ -0,0 +1,542 @@ +from abc import ABC +import dataclasses +import datetime +import enum +from io import BytesIO +import struct +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Dict, + Iterable, + Optional, + Set, + Tuple, + Type +) + +if TYPE_CHECKING: + from _typeshed import ( + SupportsRead, + SupportsWrite, + ) + +# Proto 3 data types +TYPE_ENUM = "enum" +TYPE_BOOL = "bool" +TYPE_INT32 = "int32" +TYPE_INT64 = "int64" +TYPE_UINT32 = "uint32" +TYPE_UINT64 = "uint64" +TYPE_SINT32 = "sint32" +TYPE_SINT64 = "sint64" +TYPE_FLOAT = "float" +TYPE_DOUBLE = "double" +TYPE_FIXED32 = "fixed32" +TYPE_SFIXED32 = "sfixed32" +TYPE_FIXED64 = "fixed64" +TYPE_SFIXED64 = "sfixed64" +TYPE_STRING = "string" +TYPE_BYTES = "bytes" +TYPE_MESSAGE = "message" +TYPE_MAP = "map" + +# Fields that use a fixed amount of space (4 or 8 bytes) +FIXED_TYPES = [ + TYPE_FLOAT, + TYPE_DOUBLE, + TYPE_FIXED32, + TYPE_SFIXED32, + TYPE_FIXED64, + TYPE_SFIXED64, +] + +# Fields that are numerical 64-bit types +INT_64_TYPES = [TYPE_INT64, TYPE_UINT64, TYPE_SINT64, TYPE_FIXED64, TYPE_SFIXED64] + +# Fields that are efficiently packed when +PACKED_TYPES = [ + TYPE_ENUM, + TYPE_BOOL, + TYPE_INT32, + TYPE_INT64, + TYPE_UINT32, + TYPE_UINT64, + TYPE_SINT32, + TYPE_SINT64, + TYPE_FLOAT, + TYPE_DOUBLE, + TYPE_FIXED32, + TYPE_SFIXED32, + TYPE_FIXED64, + TYPE_SFIXED64, +] + +# Wire types +# https://developers.google.com/protocol-buffers/docs/encoding#structure +WIRE_VARINT = 0 +WIRE_FIXED_64 = 1 +WIRE_LEN_DELIM = 2 +WIRE_FIXED_32 = 5 + +# Mappings of which Proto 3 types correspond to which wire types. +WIRE_VARINT_TYPES = [ + TYPE_ENUM, + TYPE_BOOL, + TYPE_INT32, + TYPE_INT64, + TYPE_UINT32, + TYPE_UINT64, + TYPE_SINT32, + TYPE_SINT64, +] +WIRE_FIXED_32_TYPES = [ + TYPE_FLOAT, + TYPE_FIXED32, + TYPE_SFIXED32 +] +WIRE_FIXED_64_TYPES = [ + TYPE_DOUBLE, + TYPE_FIXED64, + TYPE_SFIXED64 +] +WIRE_LEN_DELIM_TYPES = [ + TYPE_STRING, + TYPE_BYTES, + TYPE_MESSAGE, + TYPE_MAP +] + +SIZE_DELIMITED = -1 + +PLACEHOLDER: Any = object() + +@dataclasses.dataclass(frozen=True) +class FieldMetadata: + number: int + proto_type: str + map_types: Optional[Tuple[str, str]] = None + group: Optional[str] = None + wraps: Optional[str] = None + optional: Optional[bool] = False + +def dataclass_field( + number: int, + proto_type: str, + *, + map_types: Optional[Tuple[str, str]] = None, + group: Optional[str] = None, + wraps: Optional[str] = None, + optional: Optional[bool] = False +) -> FieldMetadata: + default=None if optional else PLACEHOLDER, + return dataclasses.field( + default=default, + metadata=FieldMetadata(number, proto_type, map_types=map_types, group=group, wraps=wraps, optional=optional) + ) + +def enum_field(number: int, group: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_ENUM, group=group, optional=optional) + +def bool_field(number: int, group: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_BOOL, group=group, optional=optional) + +def int32_field(number: int, group: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_INT32, group=group, optional=optional) + +def int64_field(number: int, group: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_INT64, group=group, optional=optional) + +def uint32_field(number: int, group: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_UINT32, group=group, optional=optional) + +def uint64_field(number: int, group: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_UINT64, group=group, optional=optional) + +def sint32_field(number: int, group: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_SINT32, group=group, optional=optional) + +def sint64_field(number: int, group: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_SINT64, group=group, optional=optional) + +def float_field(number: int, group: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_FLOAT, group=group, optional=optional) + +def double_field(number: int, group: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_DOUBLE, group=group, optional=optional) + +def fixed32_field(number: int, group: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_FIXED32, group=group, optional=optional) + +def sfixed32_field(number: int, group: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_SFIXED32, group=group, optional=optional) + +def fixed64_field(number: int, group: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_FIXED64, group=group, optional=optional) + +def sfixed64_field(number: int, group: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_SFIXED64, group=group, optional=optional) + +def string_field(number: int, group: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_STRING, group=group, optional=optional) + +def bytes_field(number: int, group: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_BYTES, group=group, optional=optional) + +def message_field(number: int, group: Optional[str] = None, wraps: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_MESSAGE, group=group, wraps=wraps, optional=optional) + +def map_field(number: int, key_type: str, value_type: str, group: Optional[str] = None, optional: bool = False) -> FieldMetadata: + return dataclass_field(number, TYPE_MAP, map_types=(key_type, value_type), group=group, optional=optional) + +def _pack_fmt(proto_type: str) -> str: + """Returns a little-endian format string for reading/writing binary.""" + return { + TYPE_DOUBLE: " None: + """Encodes a single varint and dumps it into the provided stream.""" + if value < -(1 << 63): + raise ValueError( + "Negative value is not representable as a 64-bit integer - unable to encode a varint within 10 bytes." + ) + elif value < 0: + value += 1 << 64 + + bits = value & 0x7F + value >>= 7 + while value: + stream.write((0x80 | bits).to_bytes(1, "little")) + bits = value & 0x7F + value >>= 7 + stream.write(bits.to_bytes(1, "little")) + +def encode_varint(value: int) -> bytes: + """Encodes a single varint value for serialization.""" + with BytesIO() as stream: + dump_varint(value, stream) + return stream.getvalue() + +def _preprocess_single(proto_type: str, wraps: str, value: Any) -> bytes: + """Adjusts values before serialization.""" + if proto_type in ( + TYPE_ENUM, + TYPE_BOOL, + TYPE_INT32, + TYPE_INT64, + TYPE_UINT32, + TYPE_UINT64, + ): + return encode_varint(value) + elif proto_type in (TYPE_SINT32, TYPE_SINT64): + # Handle zig-zag encoding. + return encode_varint(value << 1 if value >= 0 else (value << 1) ^ (~0)) + elif proto_type in FIXED_TYPES: + return struct.pack(_pack_fmt(proto_type), value) + elif proto_type == TYPE_STRING: + return value.encode("utf-8") + elif proto_type == TYPE_MESSAGE: + # if isinstance(value, datetime): + # # Convert the `datetime` to a timestamp message. + # value = _Timestamp.from_datetime(value) + # elif isinstance(value, datetime.timedelta): + # # Convert the `timedelta` to a duration message. + # value = _Duration.from_timedelta(value) + # elif wraps: + # if value is None: + # return b"" + # value = _get_wrapper(wraps)(value=value) + + return bytes(value) + + return value + +def _serialize_single( + field_number: int, + proto_type: str, + value: Any, + *, + serialize_empty: bool = False, + wraps: str = "", +) -> bytes: + """Serializes a single field and value.""" + value = _preprocess_single(proto_type, wraps, value) + + output = bytearray() + if proto_type in WIRE_VARINT_TYPES: + key = encode_varint(field_number << 3) + output += key + value + elif proto_type in WIRE_FIXED_32_TYPES: + key = encode_varint((field_number << 3) | 5) + output += key + value + elif proto_type in WIRE_FIXED_64_TYPES: + key = encode_varint((field_number << 3) | 1) + output += key + value + elif proto_type in WIRE_LEN_DELIM_TYPES: + if len(value) or serialize_empty or wraps: + key = encode_varint((field_number << 3) | 2) + output += key + encode_varint(len(value)) + value + else: + raise NotImplementedError(proto_type) + + return bytes(output) + +class ProtoClassMetadata: + __slots__ = ( + "oneof_group_by_field", + "oneof_field_by_group", + "default_gen", + "cls_by_field", + "field_name_by_number", + "meta_by_field_name", + "sorted_field_names", + ) + + oneof_group_by_field: Dict[str, str] + oneof_field_by_group: Dict[str, Set[dataclasses.Field]] + field_name_by_number: Dict[int, str] + meta_by_field_name: Dict[str, FieldMetadata] + sorted_field_names: Tuple[str, ...] + default_gen: Dict[str, Callable[[], Any]] + cls_by_field: Dict[str, Type] + + def __init__(self, cls: Type['Message']): + by_field = {} + by_group: Dict[str, Set] = {} + by_field_name = {} + by_field_number = {} + + fields = dataclasses.fields(cls) + for field in fields: + meta = FieldMetadata.get(field) + + if meta.group: + # This is part of a one-of group. + by_field[field.name] = meta.group + + by_group.setdefault(meta.group, set()).add(field) + + by_field_name[field.name] = meta + by_field_number[meta.number] = field.name + + self.oneof_group_by_field = by_field + self.oneof_field_by_group = by_group + self.field_name_by_number = by_field_number + self.meta_by_field_name = by_field_name + self.sorted_field_names = tuple( + by_field_number[number] for number in sorted(by_field_number) + ) + self.default_gen = self._get_default_gen(cls, fields) + self.cls_by_field = self._get_cls_by_field(cls, fields) + + @staticmethod + def _get_default_gen( + cls: Type['Message'], fields: Iterable[dataclasses.Field] + ) -> Dict[str, Callable[[], Any]]: + return {field.name: cls._get_field_default_gen(field) for field in fields} + + @staticmethod + def _get_cls_by_field( + cls: Type['Message'], fields: Iterable[dataclasses.Field] + ) -> Dict[str, Type]: + field_cls = {} + + for field in fields: + meta = FieldMetadata.get(field) + if meta.proto_type == TYPE_MAP: + assert meta.map_types + kt = cls._cls_for(field, index=0) + vt = cls._cls_for(field, index=1) + field_cls[field.name] = dataclasses.make_dataclass( + "Entry", + [ + ("key", kt, dataclass_field(1, meta.map_types[0])), + ("value", vt, dataclass_field(2, meta.map_types[1])), + ], + bases=(Message,), + ) + field_cls[f"{field.name}.value"] = vt + else: + field_cls[field.name] = cls._cls_for(field) + + return field_cls + +class Message(ABC): + """ + The base class for protobuf messages, all generated messages will inherit from + this. This class registers the message fields which are used by the serializers and + parsers to go between the Python, binary and JSON representations of the message. + """ + + _serialized_on_wire: bool + _unknown_fields: bytes + _group_current: Dict[str, str] + _meta: ClassVar[ProtoClassMetadata] + + def __post_init__(self) -> None: + # Keep track of whether every field was default + all_sentinel = True + + # Set current field of each group after `__init__` has already been run. + group_current: Dict[str, Optional[str]] = {} + for field_name, meta in self._meta.meta_by_field_name.items(): + if meta.group: + group_current.setdefault(meta.group) + + value = self.__raw_get(field_name) + if value is not PLACEHOLDER and not (meta.optional and value is None): + # Found a non-sentinel value + all_sentinel = False + + if meta.group: + # This was set, so make it the selected value of the one-of. + group_current[meta.group] = field_name + + # Now that all the defaults are set, reset it! + self.__dict__["_serialized_on_wire"] = not all_sentinel + self.__dict__["_unknown_fields"] = b"" + self.__dict__["_group_current"] = group_current + + + def dump(self, buf: 'SupportsWrite[bytes]', delimit: bool = False) -> None: + """ + Dumps the binary encoded Protobuf message to the stream. + + Parameters + ----------- + stream: :class:`BinaryIO` + The stream to dump the message to. + delimit: + Whether to prefix the message with a varint declaring its size. + """ + with BytesIO() as stream: + for field_name, meta in self._betterproto.meta_by_field_name.items(): + try: + value = getattr(self, field_name) + except AttributeError: + continue + + if value is None: + # Optional items should be skipped. This is used for the Google + # wrapper types and proto3 field presence/optional fields. + continue + + # Being selected in a group means this field is the one that is + # currently set in a `oneof` group, so it must be serialized even + # if the value is the default zero value. + # + # Note that proto3 field presence/optional fields are put in a + # synthetic single-item oneof by protoc, which helps us ensure we + # send the value even if the value is the default zero value. + selected_in_group = bool(meta.group) or meta.optional + + # Empty messages can still be sent on the wire if they were + # set (or received empty). + serialize_empty = isinstance(value, Message) and value._serialized_on_wire + + include_default_value_for_oneof = self._include_default_value_for_oneof( + field_name=field_name, meta=meta + ) + + if value == self._get_field_default(field_name) and not ( + selected_in_group or serialize_empty or include_default_value_for_oneof + ): + # Default (zero) values are not serialized. Two exceptions are + # if this is the selected oneof item or if we know we have to + # serialize an empty message (i.e. zero value was explicitly + # set by the user). + continue + + if isinstance(value, list): + if meta.proto_type in PACKED_TYPES: + # Packed lists look like a length-delimited field. First, + # preprocess/encode each value into a buffer and then + # treat it like a field of raw bytes. + buf = bytearray() + for item in value: + buf += _preprocess_single(meta.proto_type, "", item) + stream.write(_serialize_single(meta.number, TYPE_BYTES, buf)) + else: + for item in value: + stream.write( + _serialize_single( + meta.number, + meta.proto_type, + item, + wraps=meta.wraps or "", + serialize_empty=True, + ) + # if it's an empty message it still needs to be represented + # as an item in the repeated list + or b"\n\x00" + ) + + elif isinstance(value, dict): + for k, v in value.items(): + assert meta.map_types + sk = _serialize_single(1, meta.map_types[0], k) + sv = _serialize_single(2, meta.map_types[1], v) + stream.write( + _serialize_single(meta.number, meta.proto_type, sk + sv) + ) + else: + # If we have an empty string and we're including the default value for + # a oneof, make sure we serialize it. This ensures that the byte string + # output isn't simply an empty string. This also ensures that round trip + # serialization will keep `which_one_of` calls consistent. + if ( + isinstance(value, str) + and value == "" + and include_default_value_for_oneof + ): + serialize_empty = True + + stream.write( + _serialize_single( + meta.number, + meta.proto_type, + value, + serialize_empty=serialize_empty or bool(selected_in_group), + wraps=meta.wraps or "", + ) + ) + + if delimit == SIZE_DELIMITED: + dump_varint(len(stream), buf) + buf.write(stream.getvalue()) + buf.write(self._unknown_fields) + + def __bytes__(self) -> bytes: + """ + Get the binary encoded Protobuf representation of this message instance. + """ + with BytesIO() as stream: + self.dump(stream) + return stream.getvalue() + + # For compatibility with other libraries + def SerializeToString(self) -> bytes: + """ + Get the binary encoded Protobuf representation of this message instance. + + .. note:: + This is a method for compatibility with other libraries, + you should really use ``bytes(x)``. + + Returns + -------- + :class:`bytes` + The binary encoded Protobuf representation of this message instance + """ + return bytes(self) + +class Enum(enum.Enum.IntEnum, metaclass=enum.EnumType): + pass \ No newline at end of file diff --git a/src/grpclib/__init__.py b/src/grpclib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/opentelemetry/exporter/otlp/proto/common/__init__.py b/src/opentelemetry/exporter/otlp/proto/common/__init__.py new file mode 100644 index 0000000..2d336ae --- /dev/null +++ b/src/opentelemetry/exporter/otlp/proto/common/__init__.py @@ -0,0 +1,18 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from opentelemetry.exporter.otlp.proto.common.version import __version__ + +__all__ = ["__version__"] diff --git a/src/opentelemetry/exporter/otlp/proto/common/_internal/__init__.py b/src/opentelemetry/exporter/otlp/proto/common/_internal/__init__.py new file mode 100644 index 0000000..65f9477 --- /dev/null +++ b/src/opentelemetry/exporter/otlp/proto/common/_internal/__init__.py @@ -0,0 +1,178 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from collections.abc import Sequence +from itertools import count +from typing import ( + Any, + Mapping, + Optional, + List, + Callable, + TypeVar, + Dict, + Iterator, +) + +from opentelemetry.sdk.util.instrumentation import InstrumentationScope +from opentelemetry.proto.common.v1.common_pb2 import ( + InstrumentationScope as PB2InstrumentationScope, +) +from opentelemetry.proto.resource.v1.resource_pb2 import ( + Resource as PB2Resource, +) +from opentelemetry.proto.common.v1.common_pb2 import AnyValue as PB2AnyValue +from opentelemetry.proto.common.v1.common_pb2 import KeyValue as PB2KeyValue +from opentelemetry.proto.common.v1.common_pb2 import ( + KeyValueList as PB2KeyValueList, +) +from opentelemetry.proto.common.v1.common_pb2 import ( + ArrayValue as PB2ArrayValue, +) +from opentelemetry.sdk.trace import Resource +from opentelemetry.util.types import Attributes + +_logger = logging.getLogger(__name__) + +_TypingResourceT = TypeVar("_TypingResourceT") +_ResourceDataT = TypeVar("_ResourceDataT") + + +def _encode_instrumentation_scope( + instrumentation_scope: InstrumentationScope, +) -> PB2InstrumentationScope: + if instrumentation_scope is None: + return PB2InstrumentationScope() + return PB2InstrumentationScope( + name=instrumentation_scope.name, + version=instrumentation_scope.version, + ) + + +def _encode_resource(resource: Resource) -> PB2Resource: + return PB2Resource(attributes=_encode_attributes(resource.attributes)) + + +def _encode_value(value: Any) -> PB2AnyValue: + if isinstance(value, bool): + return PB2AnyValue(bool_value=value) + if isinstance(value, str): + return PB2AnyValue(string_value=value) + if isinstance(value, int): + return PB2AnyValue(int_value=value) + if isinstance(value, float): + return PB2AnyValue(double_value=value) + if isinstance(value, bytes): + return PB2AnyValue(bytes_value=value) + if isinstance(value, Sequence): + return PB2AnyValue( + array_value=PB2ArrayValue(values=[_encode_value(v) for v in value]) + ) + elif isinstance(value, Mapping): + return PB2AnyValue( + kvlist_value=PB2KeyValueList( + values=[_encode_key_value(str(k), v) for k, v in value.items()] + ) + ) + raise Exception(f"Invalid type {type(value)} of value {value}") + + +def _encode_key_value(key: str, value: Any) -> PB2KeyValue: + return PB2KeyValue(key=key, value=_encode_value(value)) + + +def _encode_span_id(span_id: int) -> bytes: + return span_id.to_bytes(length=8, byteorder="big", signed=False) + + +def _encode_trace_id(trace_id: int) -> bytes: + return trace_id.to_bytes(length=16, byteorder="big", signed=False) + + +def _encode_attributes( + attributes: Attributes, +) -> Optional[List[PB2KeyValue]]: + if attributes: + pb2_attributes = [] + for key, value in attributes.items(): + # pylint: disable=broad-exception-caught + try: + pb2_attributes.append(_encode_key_value(key, value)) + except Exception as error: + _logger.exception("Failed to encode key %s: %s", key, error) + else: + pb2_attributes = None + return pb2_attributes + + +def _get_resource_data( + sdk_resource_scope_data: Dict[Resource, _ResourceDataT], + resource_class: Callable[..., _TypingResourceT], + name: str, +) -> List[_TypingResourceT]: + resource_data = [] + + for ( + sdk_resource, + scope_data, + ) in sdk_resource_scope_data.items(): + collector_resource = PB2Resource( + attributes=_encode_attributes(sdk_resource.attributes) + ) + resource_data.append( + resource_class( + **{ + "resource": collector_resource, + "scope_{}".format(name): scope_data.values(), + } + ) + ) + return resource_data + + +def _create_exp_backoff_generator(max_value: int = 0) -> Iterator[int]: + """ + Generates an infinite sequence of exponential backoff values. The sequence starts + from 1 (2^0) and doubles each time (2^1, 2^2, 2^3, ...). If a max_value is specified + and non-zero, the generated values will not exceed this maximum, capping at max_value + instead of growing indefinitely. + + Parameters: + - max_value (int, optional): The maximum value to yield. If 0 or not provided, the + sequence grows without bound. + + Returns: + Iterator[int]: An iterator that yields the exponential backoff values, either uncapped or + capped at max_value. + + Example: + ``` + gen = _create_exp_backoff_generator(max_value=10) + for _ in range(5): + print(next(gen)) + ``` + This will print: + 1 + 2 + 4 + 8 + 10 + + Note: this functionality used to be handled by the 'backoff' package. + """ + for i in count(0): + out = 2**i + yield min(out, max_value) if max_value else out diff --git a/src/opentelemetry/exporter/otlp/proto/common/_internal/_log_encoder/__init__.py b/src/opentelemetry/exporter/otlp/proto/common/_internal/_log_encoder/__init__.py new file mode 100644 index 0000000..47ff0cf --- /dev/null +++ b/src/opentelemetry/exporter/otlp/proto/common/_internal/_log_encoder/__init__.py @@ -0,0 +1,95 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import defaultdict +from typing import Sequence, List + +from opentelemetry.exporter.otlp.proto.common._internal import ( + _encode_instrumentation_scope, + _encode_resource, + _encode_span_id, + _encode_trace_id, + _encode_value, + _encode_attributes, +) +from opentelemetry.proto.collector.logs.v1.logs_service_pb2 import ( + ExportLogsServiceRequest, +) +from opentelemetry.proto.logs.v1.logs_pb2 import ( + ScopeLogs, + ResourceLogs, +) +from opentelemetry.proto.logs.v1.logs_pb2 import LogRecord as PB2LogRecord + +from opentelemetry.sdk._logs import LogData + + +def encode_logs(batch: Sequence[LogData]) -> ExportLogsServiceRequest: + return ExportLogsServiceRequest(resource_logs=_encode_resource_logs(batch)) + + +def _encode_log(log_data: LogData) -> PB2LogRecord: + span_id = ( + None + if log_data.log_record.span_id == 0 + else _encode_span_id(log_data.log_record.span_id) + ) + trace_id = ( + None + if log_data.log_record.trace_id == 0 + else _encode_trace_id(log_data.log_record.trace_id) + ) + return PB2LogRecord( + time_unix_nano=log_data.log_record.timestamp, + observed_time_unix_nano=log_data.log_record.observed_timestamp, + span_id=span_id, + trace_id=trace_id, + flags=int(log_data.log_record.trace_flags), + body=_encode_value(log_data.log_record.body), + severity_text=log_data.log_record.severity_text, + attributes=_encode_attributes(log_data.log_record.attributes), + dropped_attributes_count=log_data.log_record.dropped_attributes, + severity_number=log_data.log_record.severity_number.value, + ) + + +def _encode_resource_logs(batch: Sequence[LogData]) -> List[ResourceLogs]: + sdk_resource_logs = defaultdict(lambda: defaultdict(list)) + + for sdk_log in batch: + sdk_resource = sdk_log.log_record.resource + sdk_instrumentation = sdk_log.instrumentation_scope or None + pb2_log = _encode_log(sdk_log) + + sdk_resource_logs[sdk_resource][sdk_instrumentation].append(pb2_log) + + pb2_resource_logs = [] + + for sdk_resource, sdk_instrumentations in sdk_resource_logs.items(): + scope_logs = [] + for sdk_instrumentation, pb2_logs in sdk_instrumentations.items(): + scope_logs.append( + ScopeLogs( + scope=(_encode_instrumentation_scope(sdk_instrumentation)), + log_records=pb2_logs, + ) + ) + pb2_resource_logs.append( + ResourceLogs( + resource=_encode_resource(sdk_resource), + scope_logs=scope_logs, + schema_url=sdk_resource.schema_url, + ) + ) + + return pb2_resource_logs diff --git a/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py b/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py new file mode 100644 index 0000000..0d66fd2 --- /dev/null +++ b/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py @@ -0,0 +1,338 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +from opentelemetry.sdk.metrics.export import ( + MetricExporter, +) +from opentelemetry.sdk.metrics.view import Aggregation +from os import environ +from opentelemetry.sdk.metrics import ( + Counter, + Histogram, + ObservableCounter, + ObservableGauge, + ObservableUpDownCounter, + UpDownCounter, +) +from opentelemetry.exporter.otlp.proto.common._internal import ( + _encode_attributes, +) +from opentelemetry.sdk.environment_variables import ( + OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE, +) +from opentelemetry.sdk.metrics.export import ( + AggregationTemporality, +) +from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import ( + ExportMetricsServiceRequest, +) +from opentelemetry.proto.common.v1.common_pb2 import InstrumentationScope +from opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2 +from opentelemetry.sdk.metrics.export import ( + MetricsData, + Gauge, + Histogram as HistogramType, + Sum, + ExponentialHistogram as ExponentialHistogramType, +) +from typing import Dict +from opentelemetry.proto.resource.v1.resource_pb2 import ( + Resource as PB2Resource, +) +from opentelemetry.sdk.environment_variables import ( + OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION, +) +from opentelemetry.sdk.metrics.view import ( + ExponentialBucketHistogramAggregation, + ExplicitBucketHistogramAggregation, +) + +_logger = logging.getLogger(__name__) + + +class OTLPMetricExporterMixin: + def _common_configuration( + self, + preferred_temporality: Dict[type, AggregationTemporality] = None, + preferred_aggregation: Dict[type, Aggregation] = None, + ) -> None: + + MetricExporter.__init__( + self, + preferred_temporality=self._get_temporality(preferred_temporality), + preferred_aggregation=self._get_aggregation(preferred_aggregation), + ) + + def _get_temporality( + self, preferred_temporality: Dict[type, AggregationTemporality] + ) -> Dict[type, AggregationTemporality]: + + otel_exporter_otlp_metrics_temporality_preference = ( + environ.get( + OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE, + "CUMULATIVE", + ) + .upper() + .strip() + ) + + if otel_exporter_otlp_metrics_temporality_preference == "DELTA": + instrument_class_temporality = { + Counter: AggregationTemporality.DELTA, + UpDownCounter: AggregationTemporality.CUMULATIVE, + Histogram: AggregationTemporality.DELTA, + ObservableCounter: AggregationTemporality.DELTA, + ObservableUpDownCounter: AggregationTemporality.CUMULATIVE, + ObservableGauge: AggregationTemporality.CUMULATIVE, + } + + elif otel_exporter_otlp_metrics_temporality_preference == "LOWMEMORY": + instrument_class_temporality = { + Counter: AggregationTemporality.DELTA, + UpDownCounter: AggregationTemporality.CUMULATIVE, + Histogram: AggregationTemporality.DELTA, + ObservableCounter: AggregationTemporality.CUMULATIVE, + ObservableUpDownCounter: AggregationTemporality.CUMULATIVE, + ObservableGauge: AggregationTemporality.CUMULATIVE, + } + + else: + if otel_exporter_otlp_metrics_temporality_preference != ( + "CUMULATIVE" + ): + _logger.warning( + "Unrecognized OTEL_EXPORTER_METRICS_TEMPORALITY_PREFERENCE" + " value found: " + f"{otel_exporter_otlp_metrics_temporality_preference}, " + "using CUMULATIVE" + ) + instrument_class_temporality = { + Counter: AggregationTemporality.CUMULATIVE, + UpDownCounter: AggregationTemporality.CUMULATIVE, + Histogram: AggregationTemporality.CUMULATIVE, + ObservableCounter: AggregationTemporality.CUMULATIVE, + ObservableUpDownCounter: AggregationTemporality.CUMULATIVE, + ObservableGauge: AggregationTemporality.CUMULATIVE, + } + + instrument_class_temporality.update(preferred_temporality or {}) + + return instrument_class_temporality + + def _get_aggregation( + self, + preferred_aggregation: Dict[type, Aggregation], + ) -> Dict[type, Aggregation]: + + otel_exporter_otlp_metrics_default_histogram_aggregation = environ.get( + OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION, + "explicit_bucket_histogram", + ) + + if otel_exporter_otlp_metrics_default_histogram_aggregation == ( + "base2_exponential_bucket_histogram" + ): + + instrument_class_aggregation = { + Histogram: ExponentialBucketHistogramAggregation(), + } + + else: + + if otel_exporter_otlp_metrics_default_histogram_aggregation != ( + "explicit_bucket_histogram" + ): + + _logger.warning( + ( + "Invalid value for %s: %s, using explicit bucket " + "histogram aggregation" + ), + OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION, + otel_exporter_otlp_metrics_default_histogram_aggregation, + ) + + instrument_class_aggregation = { + Histogram: ExplicitBucketHistogramAggregation(), + } + + instrument_class_aggregation.update(preferred_aggregation or {}) + + return instrument_class_aggregation + + +def encode_metrics(data: MetricsData) -> ExportMetricsServiceRequest: + resource_metrics_dict = {} + + for resource_metrics in data.resource_metrics: + + resource = resource_metrics.resource + + # It is safe to assume that each entry in data.resource_metrics is + # associated with an unique resource. + scope_metrics_dict = {} + + resource_metrics_dict[resource] = scope_metrics_dict + + for scope_metrics in resource_metrics.scope_metrics: + + instrumentation_scope = scope_metrics.scope + + # The SDK groups metrics in instrumentation scopes already so + # there is no need to check for existing instrumentation scopes + # here. + pb2_scope_metrics = pb2.ScopeMetrics( + scope=InstrumentationScope( + name=instrumentation_scope.name, + version=instrumentation_scope.version, + ) + ) + + scope_metrics_dict[instrumentation_scope] = pb2_scope_metrics + + for metric in scope_metrics.metrics: + pb2_metric = pb2.Metric( + name=metric.name, + description=metric.description, + unit=metric.unit, + ) + + if isinstance(metric.data, Gauge): + for data_point in metric.data.data_points: + pt = pb2.NumberDataPoint( + attributes=_encode_attributes( + data_point.attributes + ), + time_unix_nano=data_point.time_unix_nano, + ) + if isinstance(data_point.value, int): + pt.as_int = data_point.value + else: + pt.as_double = data_point.value + pb2_metric.gauge.data_points.append(pt) + + elif isinstance(metric.data, HistogramType): + for data_point in metric.data.data_points: + pt = pb2.HistogramDataPoint( + attributes=_encode_attributes( + data_point.attributes + ), + time_unix_nano=data_point.time_unix_nano, + start_time_unix_nano=( + data_point.start_time_unix_nano + ), + count=data_point.count, + sum=data_point.sum, + bucket_counts=data_point.bucket_counts, + explicit_bounds=data_point.explicit_bounds, + max=data_point.max, + min=data_point.min, + ) + pb2_metric.histogram.aggregation_temporality = ( + metric.data.aggregation_temporality + ) + pb2_metric.histogram.data_points.append(pt) + + elif isinstance(metric.data, Sum): + for data_point in metric.data.data_points: + pt = pb2.NumberDataPoint( + attributes=_encode_attributes( + data_point.attributes + ), + start_time_unix_nano=( + data_point.start_time_unix_nano + ), + time_unix_nano=data_point.time_unix_nano, + ) + if isinstance(data_point.value, int): + pt.as_int = data_point.value + else: + pt.as_double = data_point.value + # note that because sum is a message type, the + # fields must be set individually rather than + # instantiating a pb2.Sum and setting it once + pb2_metric.sum.aggregation_temporality = ( + metric.data.aggregation_temporality + ) + pb2_metric.sum.is_monotonic = metric.data.is_monotonic + pb2_metric.sum.data_points.append(pt) + + elif isinstance(metric.data, ExponentialHistogramType): + for data_point in metric.data.data_points: + + if data_point.positive.bucket_counts: + positive = pb2.ExponentialHistogramDataPoint.Buckets( + offset=data_point.positive.offset, + bucket_counts=data_point.positive.bucket_counts, + ) + else: + positive = None + + if data_point.negative.bucket_counts: + negative = pb2.ExponentialHistogramDataPoint.Buckets( + offset=data_point.negative.offset, + bucket_counts=data_point.negative.bucket_counts, + ) + else: + negative = None + + pt = pb2.ExponentialHistogramDataPoint( + attributes=_encode_attributes( + data_point.attributes + ), + time_unix_nano=data_point.time_unix_nano, + start_time_unix_nano=( + data_point.start_time_unix_nano + ), + count=data_point.count, + sum=data_point.sum, + scale=data_point.scale, + zero_count=data_point.zero_count, + positive=positive, + negative=negative, + flags=data_point.flags, + max=data_point.max, + min=data_point.min, + ) + pb2_metric.exponential_histogram.aggregation_temporality = ( + metric.data.aggregation_temporality + ) + pb2_metric.exponential_histogram.data_points.append(pt) + + else: + _logger.warning( + "unsupported data type %s", + metric.data.__class__.__name__, + ) + continue + + pb2_scope_metrics.metrics.append(pb2_metric) + + resource_data = [] + for ( + sdk_resource, + scope_data, + ) in resource_metrics_dict.items(): + resource_data.append( + pb2.ResourceMetrics( + resource=PB2Resource( + attributes=_encode_attributes(sdk_resource.attributes) + ), + scope_metrics=scope_data.values(), + schema_url=sdk_resource.schema_url, + ) + ) + resource_metrics = resource_data + return ExportMetricsServiceRequest(resource_metrics=resource_metrics) diff --git a/src/opentelemetry/exporter/otlp/proto/common/_internal/trace_encoder/__init__.py b/src/opentelemetry/exporter/otlp/proto/common/_internal/trace_encoder/__init__.py new file mode 100644 index 0000000..d382159 --- /dev/null +++ b/src/opentelemetry/exporter/otlp/proto/common/_internal/trace_encoder/__init__.py @@ -0,0 +1,189 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from collections import defaultdict +from typing import List, Optional, Sequence + +from opentelemetry.exporter.otlp.proto.common._internal import ( + _encode_attributes, + _encode_instrumentation_scope, + _encode_resource, + _encode_span_id, + _encode_trace_id, +) +from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import ( + ExportTraceServiceRequest as PB2ExportTraceServiceRequest, +) +from opentelemetry.proto.trace.v1.trace_pb2 import ( + ResourceSpans as PB2ResourceSpans, +) +from opentelemetry.proto.trace.v1.trace_pb2 import ScopeSpans as PB2ScopeSpans +from opentelemetry.proto.trace.v1.trace_pb2 import Span as PB2SPan +from opentelemetry.proto.trace.v1.trace_pb2 import SpanFlags as PB2SpanFlags +from opentelemetry.proto.trace.v1.trace_pb2 import Status as PB2Status +from opentelemetry.sdk.trace import Event, ReadableSpan +from opentelemetry.trace import Link, SpanKind +from opentelemetry.trace.span import SpanContext, Status, TraceState + +# pylint: disable=E1101 +_SPAN_KIND_MAP = { + SpanKind.INTERNAL: PB2SPan.SpanKind.SPAN_KIND_INTERNAL, + SpanKind.SERVER: PB2SPan.SpanKind.SPAN_KIND_SERVER, + SpanKind.CLIENT: PB2SPan.SpanKind.SPAN_KIND_CLIENT, + SpanKind.PRODUCER: PB2SPan.SpanKind.SPAN_KIND_PRODUCER, + SpanKind.CONSUMER: PB2SPan.SpanKind.SPAN_KIND_CONSUMER, +} + +_logger = logging.getLogger(__name__) + + +def encode_spans( + sdk_spans: Sequence[ReadableSpan], +) -> PB2ExportTraceServiceRequest: + return PB2ExportTraceServiceRequest( + resource_spans=_encode_resource_spans(sdk_spans) + ) + + +def _encode_resource_spans( + sdk_spans: Sequence[ReadableSpan], +) -> List[PB2ResourceSpans]: + # We need to inspect the spans and group + structure them as: + # + # Resource + # Instrumentation Library + # Spans + # + # First loop organizes the SDK spans in this structure. Protobuf messages + # are not hashable so we stick with SDK data in this phase. + # + # Second loop encodes the data into Protobuf format. + # + sdk_resource_spans = defaultdict(lambda: defaultdict(list)) + + for sdk_span in sdk_spans: + sdk_resource = sdk_span.resource + sdk_instrumentation = sdk_span.instrumentation_scope or None + pb2_span = _encode_span(sdk_span) + + sdk_resource_spans[sdk_resource][sdk_instrumentation].append(pb2_span) + + pb2_resource_spans = [] + + for sdk_resource, sdk_instrumentations in sdk_resource_spans.items(): + scope_spans = [] + for sdk_instrumentation, pb2_spans in sdk_instrumentations.items(): + scope_spans.append( + PB2ScopeSpans( + scope=(_encode_instrumentation_scope(sdk_instrumentation)), + spans=pb2_spans, + ) + ) + pb2_resource_spans.append( + PB2ResourceSpans( + resource=_encode_resource(sdk_resource), + scope_spans=scope_spans, + schema_url=sdk_resource.schema_url, + ) + ) + + return pb2_resource_spans + + +def _span_flags(parent_span_context: Optional[SpanContext]) -> int: + flags = PB2SpanFlags.SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK + if parent_span_context and parent_span_context.is_remote: + flags |= PB2SpanFlags.SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK + return flags + + +def _encode_span(sdk_span: ReadableSpan) -> PB2SPan: + span_context = sdk_span.get_span_context() + return PB2SPan( + trace_id=_encode_trace_id(span_context.trace_id), + span_id=_encode_span_id(span_context.span_id), + trace_state=_encode_trace_state(span_context.trace_state), + parent_span_id=_encode_parent_id(sdk_span.parent), + name=sdk_span.name, + kind=_SPAN_KIND_MAP[sdk_span.kind], + start_time_unix_nano=sdk_span.start_time, + end_time_unix_nano=sdk_span.end_time, + attributes=_encode_attributes(sdk_span.attributes), + events=_encode_events(sdk_span.events), + links=_encode_links(sdk_span.links), + status=_encode_status(sdk_span.status), + dropped_attributes_count=sdk_span.dropped_attributes, + dropped_events_count=sdk_span.dropped_events, + dropped_links_count=sdk_span.dropped_links, + flags=_span_flags(sdk_span.parent), + ) + + +def _encode_events( + events: Sequence[Event], +) -> Optional[List[PB2SPan.Event]]: + pb2_events = None + if events: + pb2_events = [] + for event in events: + encoded_event = PB2SPan.Event( + name=event.name, + time_unix_nano=event.timestamp, + attributes=_encode_attributes(event.attributes), + dropped_attributes_count=event.dropped_attributes, + ) + pb2_events.append(encoded_event) + return pb2_events + + +def _encode_links(links: Sequence[Link]) -> Sequence[PB2SPan.Link]: + pb2_links = None + if links: + pb2_links = [] + for link in links: + encoded_link = PB2SPan.Link( + trace_id=_encode_trace_id(link.context.trace_id), + span_id=_encode_span_id(link.context.span_id), + attributes=_encode_attributes(link.attributes), + dropped_attributes_count=link.dropped_attributes, + flags=_span_flags(link.context), + ) + pb2_links.append(encoded_link) + return pb2_links + + +def _encode_status(status: Status) -> Optional[PB2Status]: + pb2_status = None + if status is not None: + pb2_status = PB2Status( + code=status.status_code.value, + message=status.description, + ) + return pb2_status + + +def _encode_trace_state(trace_state: TraceState) -> Optional[str]: + pb2_trace_state = None + if trace_state is not None: + pb2_trace_state = ",".join( + [f"{key}={value}" for key, value in (trace_state.items())] + ) + return pb2_trace_state + + +def _encode_parent_id(context: Optional[SpanContext]) -> Optional[bytes]: + if context: + return _encode_span_id(context.span_id) + return None diff --git a/src/opentelemetry/exporter/otlp/proto/common/_log_encoder.py b/src/opentelemetry/exporter/otlp/proto/common/_log_encoder.py new file mode 100644 index 0000000..f34ff82 --- /dev/null +++ b/src/opentelemetry/exporter/otlp/proto/common/_log_encoder.py @@ -0,0 +1,20 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from opentelemetry.exporter.otlp.proto.common._internal._log_encoder import ( + encode_logs, +) + +__all__ = ["encode_logs"] diff --git a/src/opentelemetry/exporter/otlp/proto/common/metrics_encoder.py b/src/opentelemetry/exporter/otlp/proto/common/metrics_encoder.py new file mode 100644 index 0000000..14f8fc3 --- /dev/null +++ b/src/opentelemetry/exporter/otlp/proto/common/metrics_encoder.py @@ -0,0 +1,20 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from opentelemetry.exporter.otlp.proto.common._internal.metrics_encoder import ( + encode_metrics, +) + +__all__ = ["encode_metrics"] diff --git a/src/opentelemetry/exporter/otlp/proto/common/py.typed b/src/opentelemetry/exporter/otlp/proto/common/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/src/opentelemetry/exporter/otlp/proto/common/trace_encoder.py b/src/opentelemetry/exporter/otlp/proto/common/trace_encoder.py new file mode 100644 index 0000000..2af5765 --- /dev/null +++ b/src/opentelemetry/exporter/otlp/proto/common/trace_encoder.py @@ -0,0 +1,20 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from opentelemetry.exporter.otlp.proto.common._internal.trace_encoder import ( + encode_spans, +) + +__all__ = ["encode_spans"] diff --git a/src/opentelemetry/exporter/otlp/proto/common/version.py b/src/opentelemetry/exporter/otlp/proto/common/version.py new file mode 100644 index 0000000..38affcb --- /dev/null +++ b/src/opentelemetry/exporter/otlp/proto/common/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "1.28.0.dev" diff --git a/src/opentelemetry/proto/__init__.py b/src/opentelemetry/proto/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/opentelemetry/proto/collector/__init__.py b/src/opentelemetry/proto/collector/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/opentelemetry/proto/collector/logs/v1/__init__.py b/src/opentelemetry/proto/collector/logs/v1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/opentelemetry/proto/collector/logs/v1/logs_service_pb2.py b/src/opentelemetry/proto/collector/logs/v1/logs_service_pb2.py new file mode 100644 index 0000000..72f52f4 --- /dev/null +++ b/src/opentelemetry/proto/collector/logs/v1/logs_service_pb2.py @@ -0,0 +1,77 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# sources: opentelemetry/proto/collector/logs/v1/logs_service.proto +# plugin: python-betterproto +from dataclasses import dataclass +from typing import List + +import betterproto +import grpclib + +from opentelemetry.proto.logs.v1 import logs_pb2 as v1 + + +@dataclass +class ExportLogsServiceRequest(betterproto.Message): + # An array of ResourceLogs. For data coming from a single resource this array + # will typically contain one element. Intermediary nodes (such as + # OpenTelemetry Collector) that receive data from multiple origins typically + # batch the data before forwarding further and in that case this array will + # contain multiple elements. + resource_logs: List[v1.ResourceLogs] = betterproto.message_field(1) + + +@dataclass +class ExportLogsServiceResponse(betterproto.Message): + # The details of a partially successful export request. If the request is + # only partially accepted (i.e. when the server accepts only parts of the + # data and rejects the rest) the server MUST initialize the `partial_success` + # field and MUST set the `rejected_` with the number of items it + # rejected. Servers MAY also make use of the `partial_success` field to + # convey warnings/suggestions to senders even when the request was fully + # accepted. In such cases, the `rejected_` MUST have a value of `0` + # and the `error_message` MUST be non-empty. A `partial_success` message with + # an empty value (rejected_ = 0 and `error_message` = "") is + # equivalent to it not being set/present. Senders SHOULD interpret it the + # same way as in the full success case. + partial_success: "ExportLogsPartialSuccess" = betterproto.message_field(1) + + +@dataclass +class ExportLogsPartialSuccess(betterproto.Message): + # The number of rejected log records. A `rejected_` field holding a + # `0` value indicates that the request was fully accepted. + rejected_log_records: int = betterproto.int64_field(1) + # A developer-facing human-readable message in English. It should be used + # either to explain why the server rejected parts of the data during a + # partial success or to convey warnings/suggestions during a full success. + # The message should offer guidance on how users can address such issues. + # error_message is an optional field. An error_message with an empty value is + # equivalent to it not being set. + error_message: str = betterproto.string_field(2) + + +class LogsServiceStub(betterproto.ServiceStub): + """ + Service that can be used to push logs between one Application instrumented + with OpenTelemetry and an collector, or between an collector and a central + collector (in this case logs are sent/received to/from multiple + Applications). + """ + + async def export( + self, *, resource_logs: List[v1.ResourceLogs] = [] + ) -> ExportLogsServiceResponse: + """ + For performance reasons, it is recommended to keep this RPC alive for + the entire life of the application. + """ + + request = ExportLogsServiceRequest() + if resource_logs is not None: + request.resource_logs = resource_logs + + return await self._unary_unary( + "/opentelemetry.proto.collector.logs.v1.LogsService/Export", + request, + ExportLogsServiceResponse, + ) diff --git a/src/opentelemetry/proto/collector/metrics/v1/__init__.py b/src/opentelemetry/proto/collector/metrics/v1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/opentelemetry/proto/collector/metrics/v1/metrics_service_pb2.py b/src/opentelemetry/proto/collector/metrics/v1/metrics_service_pb2.py new file mode 100644 index 0000000..9ab3835 --- /dev/null +++ b/src/opentelemetry/proto/collector/metrics/v1/metrics_service_pb2.py @@ -0,0 +1,76 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# sources: opentelemetry/proto/collector/metrics/v1/metrics_service.proto +# plugin: python-betterproto +from dataclasses import dataclass +from typing import List + +import betterproto +import grpclib + +from opentelemetry.proto.metrics.v1 import metrics_pb2 as v1 + + +@dataclass +class ExportMetricsServiceRequest(betterproto.Message): + # An array of ResourceMetrics. For data coming from a single resource this + # array will typically contain one element. Intermediary nodes (such as + # OpenTelemetry Collector) that receive data from multiple origins typically + # batch the data before forwarding further and in that case this array will + # contain multiple elements. + resource_metrics: List[v1.ResourceMetrics] = betterproto.message_field(1) + + +@dataclass +class ExportMetricsServiceResponse(betterproto.Message): + # The details of a partially successful export request. If the request is + # only partially accepted (i.e. when the server accepts only parts of the + # data and rejects the rest) the server MUST initialize the `partial_success` + # field and MUST set the `rejected_` with the number of items it + # rejected. Servers MAY also make use of the `partial_success` field to + # convey warnings/suggestions to senders even when the request was fully + # accepted. In such cases, the `rejected_` MUST have a value of `0` + # and the `error_message` MUST be non-empty. A `partial_success` message with + # an empty value (rejected_ = 0 and `error_message` = "") is + # equivalent to it not being set/present. Senders SHOULD interpret it the + # same way as in the full success case. + partial_success: "ExportMetricsPartialSuccess" = betterproto.message_field(1) + + +@dataclass +class ExportMetricsPartialSuccess(betterproto.Message): + # The number of rejected data points. A `rejected_` field holding a + # `0` value indicates that the request was fully accepted. + rejected_data_points: int = betterproto.int64_field(1) + # A developer-facing human-readable message in English. It should be used + # either to explain why the server rejected parts of the data during a + # partial success or to convey warnings/suggestions during a full success. + # The message should offer guidance on how users can address such issues. + # error_message is an optional field. An error_message with an empty value is + # equivalent to it not being set. + error_message: str = betterproto.string_field(2) + + +class MetricsServiceStub(betterproto.ServiceStub): + """ + Service that can be used to push metrics between one Application + instrumented with OpenTelemetry and a collector, or between a collector and + a central collector. + """ + + async def export( + self, *, resource_metrics: List[v1.ResourceMetrics] = [] + ) -> ExportMetricsServiceResponse: + """ + For performance reasons, it is recommended to keep this RPC alive for + the entire life of the application. + """ + + request = ExportMetricsServiceRequest() + if resource_metrics is not None: + request.resource_metrics = resource_metrics + + return await self._unary_unary( + "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", + request, + ExportMetricsServiceResponse, + ) diff --git a/src/opentelemetry/proto/collector/trace/v1/__init__.py b/src/opentelemetry/proto/collector/trace/v1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/opentelemetry/proto/collector/trace/v1/trace_service_pb2.py b/src/opentelemetry/proto/collector/trace/v1/trace_service_pb2.py new file mode 100644 index 0000000..f916dfc --- /dev/null +++ b/src/opentelemetry/proto/collector/trace/v1/trace_service_pb2.py @@ -0,0 +1,77 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# sources: opentelemetry/proto/collector/trace/v1/trace_service.proto +# plugin: python-betterproto +from dataclasses import dataclass +from typing import List + +import betterproto +import grpclib + +from opentelemetry.proto.trace.v1 import trace_pb2 as v1 + + +@dataclass +class ExportTraceServiceRequest(betterproto.Message): + # An array of ResourceSpans. For data coming from a single resource this + # array will typically contain one element. Intermediary nodes (such as + # OpenTelemetry Collector) that receive data from multiple origins typically + # batch the data before forwarding further and in that case this array will + # contain multiple elements. + resource_spans: List[v1.ResourceSpans] = betterproto.message_field(1) + + +@dataclass +class ExportTraceServiceResponse(betterproto.Message): + # The details of a partially successful export request. If the request is + # only partially accepted (i.e. when the server accepts only parts of the + # data and rejects the rest) the server MUST initialize the `partial_success` + # field and MUST set the `rejected_` with the number of items it + # rejected. Servers MAY also make use of the `partial_success` field to + # convey warnings/suggestions to senders even when the request was fully + # accepted. In such cases, the `rejected_` MUST have a value of `0` + # and the `error_message` MUST be non-empty. A `partial_success` message with + # an empty value (rejected_ = 0 and `error_message` = "") is + # equivalent to it not being set/present. Senders SHOULD interpret it the + # same way as in the full success case. + partial_success: "ExportTracePartialSuccess" = betterproto.message_field(1) + + +@dataclass +class ExportTracePartialSuccess(betterproto.Message): + # The number of rejected spans. A `rejected_` field holding a `0` + # value indicates that the request was fully accepted. + rejected_spans: int = betterproto.int64_field(1) + # A developer-facing human-readable message in English. It should be used + # either to explain why the server rejected parts of the data during a + # partial success or to convey warnings/suggestions during a full success. + # The message should offer guidance on how users can address such issues. + # error_message is an optional field. An error_message with an empty value is + # equivalent to it not being set. + error_message: str = betterproto.string_field(2) + + +class TraceServiceStub(betterproto.ServiceStub): + """ + Service that can be used to push spans between one Application instrumented + with OpenTelemetry and a collector, or between a collector and a central + collector (in this case spans are sent/received to/from multiple + Applications). + """ + + async def export( + self, *, resource_spans: List[v1.ResourceSpans] = [] + ) -> ExportTraceServiceResponse: + """ + For performance reasons, it is recommended to keep this RPC alive for + the entire life of the application. + """ + + request = ExportTraceServiceRequest() + if resource_spans is not None: + request.resource_spans = resource_spans + + return await self._unary_unary( + "/opentelemetry.proto.collector.trace.v1.TraceService/Export", + request, + ExportTraceServiceResponse, + ) diff --git a/src/opentelemetry/proto/common/v1/__init__.py b/src/opentelemetry/proto/common/v1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/opentelemetry/proto/common/v1/common_pb2.py b/src/opentelemetry/proto/common/v1/common_pb2.py new file mode 100644 index 0000000..c3dd744 --- /dev/null +++ b/src/opentelemetry/proto/common/v1/common_pb2.py @@ -0,0 +1,79 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# sources: opentelemetry/proto/common/v1/common.proto +# plugin: python-betterproto +from dataclasses import dataclass +from typing import List + +import betterproto + + +@dataclass +class AnyValue(betterproto.Message): + """ + AnyValue is used to represent any type of attribute value. AnyValue may + contain a primitive value such as a string or integer or it may contain an + arbitrary nested object containing arrays, key-value lists and primitives. + """ + + string_value: str = betterproto.string_field(1, group="value") + bool_value: bool = betterproto.bool_field(2, group="value") + int_value: int = betterproto.int64_field(3, group="value") + double_value: float = betterproto.double_field(4, group="value") + array_value: "ArrayValue" = betterproto.message_field(5, group="value") + kvlist_value: "KeyValueList" = betterproto.message_field(6, group="value") + bytes_value: bytes = betterproto.bytes_field(7, group="value") + + +@dataclass +class ArrayValue(betterproto.Message): + """ + ArrayValue is a list of AnyValue messages. We need ArrayValue as a message + since oneof in AnyValue does not allow repeated fields. + """ + + # Array of values. The array may be empty (contain 0 elements). + values: List["AnyValue"] = betterproto.message_field(1) + + +@dataclass +class KeyValueList(betterproto.Message): + """ + KeyValueList is a list of KeyValue messages. We need KeyValueList as a + message since `oneof` in AnyValue does not allow repeated fields. + Everywhere else where we need a list of KeyValue messages (e.g. in Span) we + use `repeated KeyValue` directly to avoid unnecessary extra wrapping (which + slows down the protocol). The 2 approaches are semantically equivalent. + """ + + # A collection of key/value pairs of key-value pairs. The list may be empty + # (may contain 0 elements). The keys MUST be unique (it is not allowed to + # have more than one value with the same key). + values: List["KeyValue"] = betterproto.message_field(1) + + +@dataclass +class KeyValue(betterproto.Message): + """ + KeyValue is a key-value pair that is used to store Span attributes, Link + attributes, etc. + """ + + key: str = betterproto.string_field(1) + value: "AnyValue" = betterproto.message_field(2) + + +@dataclass +class InstrumentationScope(betterproto.Message): + """ + InstrumentationScope is a message representing the instrumentation scope + information such as the fully qualified name and version. + """ + + # An empty instrumentation scope name means the name is unknown. + name: str = betterproto.string_field(1) + version: str = betterproto.string_field(2) + # Additional attributes that describe the scope. [Optional]. Attribute keys + # MUST be unique (it is not allowed to have more than one attribute with the + # same key). + attributes: List["KeyValue"] = betterproto.message_field(3) + dropped_attributes_count: int = betterproto.uint32_field(4) diff --git a/src/opentelemetry/proto/logs/v1/__init__.py b/src/opentelemetry/proto/logs/v1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/opentelemetry/proto/logs/v1/logs_pb2.py b/src/opentelemetry/proto/logs/v1/logs_pb2.py new file mode 100644 index 0000000..e083186 --- /dev/null +++ b/src/opentelemetry/proto/logs/v1/logs_pb2.py @@ -0,0 +1,177 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# sources: opentelemetry/proto/logs/v1/logs.proto +# plugin: python-betterproto +from dataclasses import dataclass +from typing import List + +import betterproto + +from opentelemetry.proto.common.v1 import common_pb2 +from opentelemetry.proto.resource.v1 import resource_pb2 + + +class SeverityNumber(betterproto.Enum): + """Possible values for LogRecord.SeverityNumber.""" + + # UNSPECIFIED is the default SeverityNumber, it MUST NOT be used. + SEVERITY_NUMBER_UNSPECIFIED = 0 + SEVERITY_NUMBER_TRACE = 1 + SEVERITY_NUMBER_TRACE2 = 2 + SEVERITY_NUMBER_TRACE3 = 3 + SEVERITY_NUMBER_TRACE4 = 4 + SEVERITY_NUMBER_DEBUG = 5 + SEVERITY_NUMBER_DEBUG2 = 6 + SEVERITY_NUMBER_DEBUG3 = 7 + SEVERITY_NUMBER_DEBUG4 = 8 + SEVERITY_NUMBER_INFO = 9 + SEVERITY_NUMBER_INFO2 = 10 + SEVERITY_NUMBER_INFO3 = 11 + SEVERITY_NUMBER_INFO4 = 12 + SEVERITY_NUMBER_WARN = 13 + SEVERITY_NUMBER_WARN2 = 14 + SEVERITY_NUMBER_WARN3 = 15 + SEVERITY_NUMBER_WARN4 = 16 + SEVERITY_NUMBER_ERROR = 17 + SEVERITY_NUMBER_ERROR2 = 18 + SEVERITY_NUMBER_ERROR3 = 19 + SEVERITY_NUMBER_ERROR4 = 20 + SEVERITY_NUMBER_FATAL = 21 + SEVERITY_NUMBER_FATAL2 = 22 + SEVERITY_NUMBER_FATAL3 = 23 + SEVERITY_NUMBER_FATAL4 = 24 + + +class LogRecordFlags(betterproto.Enum): + """ + LogRecordFlags represents constants used to interpret the LogRecord.flags + field, which is protobuf 'fixed32' type and is to be used as bit-fields. + Each non-zero value defined in this enum is a bit-mask. To extract the + bit-field, for example, use an expression like: (logRecord.flags & + LOG_RECORD_FLAGS_TRACE_FLAGS_MASK) + """ + + # The zero value for the enum. Should not be used for comparisons. Instead + # use bitwise "and" with the appropriate mask as shown above. + LOG_RECORD_FLAGS_DO_NOT_USE = 0 + # Bits 0-7 are used for trace flags. + LOG_RECORD_FLAGS_TRACE_FLAGS_MASK = 255 + + +@dataclass +class LogsData(betterproto.Message): + """ + LogsData represents the logs data that can be stored in a persistent + storage, OR can be embedded by other protocols that transfer OTLP logs data + but do not implement the OTLP protocol. The main difference between this + message and collector protocol is that in this message there will not be + any "control" or "metadata" specific to OTLP protocol. When new fields are + added into this message, the OTLP request MUST be updated as well. + """ + + # An array of ResourceLogs. For data coming from a single resource this array + # will typically contain one element. Intermediary nodes that receive data + # from multiple origins typically batch the data before forwarding further + # and in that case this array will contain multiple elements. + resource_logs: List["ResourceLogs"] = betterproto.message_field(1) + + +@dataclass +class ResourceLogs(betterproto.Message): + """A collection of ScopeLogs from a Resource.""" + + # The resource for the logs in this message. If this field is not set then + # resource info is unknown. + resource: resource_pb2.Resource = betterproto.message_field(1) + # A list of ScopeLogs that originate from a resource. + scope_logs: List["ScopeLogs"] = betterproto.message_field(2) + # The Schema URL, if known. This is the identifier of the Schema that the + # resource data is recorded in. To learn more about Schema URL see + # https://opentelemetry.io/docs/specs/otel/schemas/#schema-url This + # schema_url applies to the data in the "resource" field. It does not apply + # to the data in the "scope_logs" field which have their own schema_url + # field. + schema_url: str = betterproto.string_field(3) + + +@dataclass +class ScopeLogs(betterproto.Message): + """A collection of Logs produced by a Scope.""" + + # The instrumentation scope information for the logs in this message. + # Semantically when InstrumentationScope isn't set, it is equivalent with an + # empty instrumentation scope name (unknown). + scope: common_pb2.InstrumentationScope = betterproto.message_field(1) + # A list of log records. + log_records: List["LogRecord"] = betterproto.message_field(2) + # The Schema URL, if known. This is the identifier of the Schema that the log + # data is recorded in. To learn more about Schema URL see + # https://opentelemetry.io/docs/specs/otel/schemas/#schema-url This + # schema_url applies to all logs in the "logs" field. + schema_url: str = betterproto.string_field(3) + + +@dataclass +class LogRecord(betterproto.Message): + """ + A log record according to OpenTelemetry Log Data Model: + https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data- + model.md + """ + + # time_unix_nano is the time when the event occurred. Value is UNIX Epoch + # time in nanoseconds since 00:00:00 UTC on 1 January 1970. Value of 0 + # indicates unknown or missing timestamp. + time_unix_nano: float = betterproto.fixed64_field(1) + # Time when the event was observed by the collection system. For events that + # originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK) this + # timestamp is typically set at the generation time and is equal to + # Timestamp. For events originating externally and collected by OpenTelemetry + # (e.g. using Collector) this is the time when OpenTelemetry's code observed + # the event measured by the clock of the OpenTelemetry code. This field MUST + # be set once the event is observed by OpenTelemetry. For converting + # OpenTelemetry log data to formats that support only one timestamp or when + # receiving OpenTelemetry log data by recipients that support only one + # timestamp internally the following logic is recommended: - Use + # time_unix_nano if it is present, otherwise use observed_time_unix_nano. + # Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + # 1970. Value of 0 indicates unknown or missing timestamp. + observed_time_unix_nano: float = betterproto.fixed64_field(11) + # Numerical value of the severity, normalized to values described in Log Data + # Model. [Optional]. + severity_number: "SeverityNumber" = betterproto.enum_field(2) + # The severity text (also known as log level). The original string + # representation as it is known at the source. [Optional]. + severity_text: str = betterproto.string_field(3) + # A value containing the body of the log record. Can be for example a human- + # readable string message (including multi-line) describing the event in a + # free form or it can be a structured data composed of arrays and maps of + # other values. [Optional]. + body: common_pb2.AnyValue = betterproto.message_field(5) + # Additional attributes that describe the specific event occurrence. + # [Optional]. Attribute keys MUST be unique (it is not allowed to have more + # than one attribute with the same key). + attributes: List[common_pb2.KeyValue] = betterproto.message_field(6) + dropped_attributes_count: int = betterproto.uint32_field(7) + # Flags, a bit field. 8 least significant bits are the trace flags as defined + # in W3C Trace Context specification. 24 most significant bits are reserved + # and must be set to 0. Readers must not assume that 24 most significant bits + # will be zero and must correctly mask the bits when reading 8-bit trace flag + # (use flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). [Optional]. + flags: float = betterproto.fixed32_field(8) + # A unique identifier for a trace. All logs from the same trace share the + # same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR of + # length other than 16 bytes is considered invalid (empty string in OTLP/JSON + # is zero-length and thus is also invalid). This field is optional. The + # receivers SHOULD assume that the log record is not associated with a trace + # if any of the following is true: - the field is not present, - the + # field contains an invalid value. + trace_id: bytes = betterproto.bytes_field(9) + # A unique identifier for a span within a trace, assigned when the span is + # created. The ID is an 8-byte array. An ID with all zeroes OR of length + # other than 8 bytes is considered invalid (empty string in OTLP/JSON is + # zero-length and thus is also invalid). This field is optional. If the + # sender specifies a valid span_id then it SHOULD also specify a valid + # trace_id. The receivers SHOULD assume that the log record is not associated + # with a span if any of the following is true: - the field is not present, + # - the field contains an invalid value. + span_id: bytes = betterproto.bytes_field(10) diff --git a/src/opentelemetry/proto/metrics/v1/__init__.py b/src/opentelemetry/proto/metrics/v1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/opentelemetry/proto/metrics/v1/metrics_pb2.py b/src/opentelemetry/proto/metrics/v1/metrics_pb2.py new file mode 100644 index 0000000..b51fac7 --- /dev/null +++ b/src/opentelemetry/proto/metrics/v1/metrics_pb2.py @@ -0,0 +1,561 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# sources: opentelemetry/proto/metrics/v1/metrics.proto +# plugin: python-betterproto +from dataclasses import dataclass +from typing import List + +import betterproto + +from opentelemetry.proto.common.v1 import common_pb2 +from opentelemetry.proto.resource.v1 import resource_pb2 + + +class AggregationTemporality(betterproto.Enum): + """ + AggregationTemporality defines how a metric aggregator reports aggregated + values. It describes how those values relate to the time interval over + which they are aggregated. + """ + + # UNSPECIFIED is the default AggregationTemporality, it MUST not be used. + AGGREGATION_TEMPORALITY_UNSPECIFIED = 0 + # DELTA is an AggregationTemporality for a metric aggregator which reports + # changes since last report time. Successive metrics contain aggregation of + # values from continuous and non-overlapping intervals. The values for a + # DELTA metric are based only on the time interval associated with one + # measurement cycle. There is no dependency on previous measurements like is + # the case for CUMULATIVE metrics. For example, consider a system measuring + # the number of requests that it receives and reports the sum of these + # requests every second as a DELTA metric: 1. The system starts receiving + # at time=t_0. 2. A request is received, the system measures 1 request. + # 3. A request is received, the system measures 1 request. 4. A request is + # received, the system measures 1 request. 5. The 1 second collection cycle + # ends. A metric is exported for the number of requests received over + # the interval of time t_0 to t_0+1 with a value of 3. 6. A request is + # received, the system measures 1 request. 7. A request is received, the + # system measures 1 request. 8. The 1 second collection cycle ends. A + # metric is exported for the number of requests received over the + # interval of time t_0+1 to t_0+2 with a value of 2. + AGGREGATION_TEMPORALITY_DELTA = 1 + # CUMULATIVE is an AggregationTemporality for a metric aggregator which + # reports changes since a fixed start time. This means that current values of + # a CUMULATIVE metric depend on all previous measurements since the start + # time. Because of this, the sender is required to retain this state in some + # form. If this state is lost or invalidated, the CUMULATIVE metric values + # MUST be reset and a new fixed start time following the last reported + # measurement time sent MUST be used. For example, consider a system + # measuring the number of requests that it receives and reports the sum of + # these requests every second as a CUMULATIVE metric: 1. The system starts + # receiving at time=t_0. 2. A request is received, the system measures 1 + # request. 3. A request is received, the system measures 1 request. 4. A + # request is received, the system measures 1 request. 5. The 1 second + # collection cycle ends. A metric is exported for the number of requests + # received over the interval of time t_0 to t_0+1 with a value of 3. + # 6. A request is received, the system measures 1 request. 7. A request is + # received, the system measures 1 request. 8. The 1 second collection cycle + # ends. A metric is exported for the number of requests received over + # the interval of time t_0 to t_0+2 with a value of 5. 9. The system + # experiences a fault and loses state. 10. The system recovers and resumes + # receiving at time=t_1. 11. A request is received, the system measures 1 + # request. 12. The 1 second collection cycle ends. A metric is exported for + # the number of requests received over the interval of time t_1 to + # t_0+1 with a value of 1. Note: Even though, when reporting changes since + # last report time, using CUMULATIVE is valid, it is not recommended. This + # may cause problems for systems that do not use start_time to determine when + # the aggregation value was reset (e.g. Prometheus). + AGGREGATION_TEMPORALITY_CUMULATIVE = 2 + + +class DataPointFlags(betterproto.Enum): + """ + DataPointFlags is defined as a protobuf 'uint32' type and is to be used as + a bit-field representing 32 distinct boolean flags. Each flag defined in + this enum is a bit-mask. To test the presence of a single flag in the + flags of a data point, for example, use an expression like: (point.flags + & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == + DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK + """ + + # The zero value for the enum. Should not be used for comparisons. Instead + # use bitwise "and" with the appropriate mask as shown above. + DATA_POINT_FLAGS_DO_NOT_USE = 0 + # This DataPoint is valid but has no recorded value. This value SHOULD be + # used to reflect explicitly missing data in a series, as for an equivalent + # to the Prometheus "staleness marker". + DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK = 1 + + +@dataclass +class MetricsData(betterproto.Message): + """ + MetricsData represents the metrics data that can be stored in a persistent + storage, OR can be embedded by other protocols that transfer OTLP metrics + data but do not implement the OTLP protocol. MetricsData └─── + ResourceMetrics ├── Resource ├── SchemaURL └── ScopeMetrics ├── + Scope ├── SchemaURL └── Metric ├── Name ├── + Description ├── Unit └── data ├── Gauge + ├── Sum ├── Histogram ├── ExponentialHistogram + └── Summary The main difference between this message and collector protocol + is that in this message there will not be any "control" or "metadata" + specific to OTLP protocol. When new fields are added into this message, the + OTLP request MUST be updated as well. + """ + + # An array of ResourceMetrics. For data coming from a single resource this + # array will typically contain one element. Intermediary nodes that receive + # data from multiple origins typically batch the data before forwarding + # further and in that case this array will contain multiple elements. + resource_metrics: List["ResourceMetrics"] = betterproto.message_field(1) + + +@dataclass +class ResourceMetrics(betterproto.Message): + """A collection of ScopeMetrics from a Resource.""" + + # The resource for the metrics in this message. If this field is not set then + # no resource info is known. + resource: resource_pb2.Resource = betterproto.message_field(1) + # A list of metrics that originate from a resource. + scope_metrics: List["ScopeMetrics"] = betterproto.message_field(2) + # The Schema URL, if known. This is the identifier of the Schema that the + # resource data is recorded in. To learn more about Schema URL see + # https://opentelemetry.io/docs/specs/otel/schemas/#schema-url This + # schema_url applies to the data in the "resource" field. It does not apply + # to the data in the "scope_metrics" field which have their own schema_url + # field. + schema_url: str = betterproto.string_field(3) + + +@dataclass +class ScopeMetrics(betterproto.Message): + """A collection of Metrics produced by an Scope.""" + + # The instrumentation scope information for the metrics in this message. + # Semantically when InstrumentationScope isn't set, it is equivalent with an + # empty instrumentation scope name (unknown). + scope: common_pb2.InstrumentationScope = betterproto.message_field(1) + # A list of metrics that originate from an instrumentation library. + metrics: List["Metric"] = betterproto.message_field(2) + # The Schema URL, if known. This is the identifier of the Schema that the + # metric data is recorded in. To learn more about Schema URL see + # https://opentelemetry.io/docs/specs/otel/schemas/#schema-url This + # schema_url applies to all metrics in the "metrics" field. + schema_url: str = betterproto.string_field(3) + + +@dataclass +class Metric(betterproto.Message): + """ + Defines a Metric which has one or more timeseries. The following is a + brief summary of the Metric data model. For more details, see: + https://github.com/open-telemetry/opentelemetry- + specification/blob/main/specification/metrics/data-model.md The data model + and relation between entities is shown in the diagram below. Here, + "DataPoint" is the term used to refer to any one of the specific data point + value types, and "points" is the term used to refer to any one of the lists + of points contained in the Metric. - Metric is composed of a metadata and + data. - Metadata part contains a name, description, unit. - Data is one of + the possible types (Sum, Gauge, Histogram, Summary). - DataPoint contains + timestamps, attributes, and one of the possible value type fields. + Metric +------------+ |name | |description | |unit | + +------------------------------------+ |data |---> |Gauge, Sum, + Histogram, Summary, ... | +------------+ + +------------------------------------+ Data [One of Gauge, Sum, + Histogram, Summary, ...] +-----------+ |... | // Metadata about + the Data. |points |--+ +-----------+ | | + +---------------------------+ | |DataPoint 1 + | v |+------+------+ +------+ | +-----+ + ||label |label |...|label | | | 1 + |-->||value1|value2|...|valueN| | +-----+ |+------+------+ + +------+ | | . | |+-----+ | + | . | ||value| | | . | |+-----+ + | | . | +---------------------------+ | . | + . | . | . | . | + . | . | +---------------------------+ | . | + |DataPoint M | +-----+ |+------+------+ + +------+ | | M |-->||label |label |...|label | | + +-----+ ||value1|value2|...|valueN| | + |+------+------+ +------+ | |+-----+ + | ||value| | + |+-----+ | + +---------------------------+ Each distinct type of DataPoint represents + the output of a specific aggregation function, the result of applying the + DataPoint's associated function of to one or more measurements. All + DataPoint types have three common fields: - Attributes includes key-value + pairs associated with the data point - TimeUnixNano is required, set to the + end time of the aggregation - StartTimeUnixNano is optional, but strongly + encouraged for DataPoints having an AggregationTemporality field, as + discussed below. Both TimeUnixNano and StartTimeUnixNano values are + expressed as UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. # TimeUnixNano This field is required, having consistent + interpretation across DataPoint types. TimeUnixNano is the moment + corresponding to when the data point's aggregate value was captured. Data + points with the 0 value for TimeUnixNano SHOULD be rejected by consumers. # + StartTimeUnixNano StartTimeUnixNano in general allows detecting when a + sequence of observations is unbroken. This field indicates to consumers + the start time for points with cumulative and delta AggregationTemporality, + and it should be included whenever possible to support correct rate + calculation. Although it may be omitted when the start time is truly + unknown, setting StartTimeUnixNano is strongly encouraged. + """ + + # name of the metric. + name: str = betterproto.string_field(1) + # description of the metric, which can be used in documentation. + description: str = betterproto.string_field(2) + # unit in which the metric value is reported. Follows the format described by + # http://unitsofmeasure.org/ucum.html. + unit: str = betterproto.string_field(3) + gauge: "Gauge" = betterproto.message_field(5, group="data") + sum: "Sum" = betterproto.message_field(7, group="data") + histogram: "Histogram" = betterproto.message_field(9, group="data") + exponential_histogram: "ExponentialHistogram" = betterproto.message_field( + 10, group="data" + ) + summary: "Summary" = betterproto.message_field(11, group="data") + # Additional metadata attributes that describe the metric. [Optional]. + # Attributes are non-identifying. Consumers SHOULD NOT need to be aware of + # these attributes. These attributes MAY be used to encode information + # allowing for lossless roundtrip translation to / from another data model. + # Attribute keys MUST be unique (it is not allowed to have more than one + # attribute with the same key). + metadata: List[common_pb2.KeyValue] = betterproto.message_field(12) + + +@dataclass +class Gauge(betterproto.Message): + """ + Gauge represents the type of a scalar metric that always exports the + "current value" for every data point. It should be used for an "unknown" + aggregation. A Gauge does not support different aggregation temporalities. + Given the aggregation is unknown, points cannot be combined using the same + aggregation, regardless of aggregation temporalities. Therefore, + AggregationTemporality is not included. Consequently, this also means + "StartTimeUnixNano" is ignored for all data points. + """ + + data_points: List["NumberDataPoint"] = betterproto.message_field(1) + + +@dataclass +class Sum(betterproto.Message): + """ + Sum represents the type of a scalar metric that is calculated as a sum of + all reported measurements over a time interval. + """ + + data_points: List["NumberDataPoint"] = betterproto.message_field(1) + # aggregation_temporality describes if the aggregator reports delta changes + # since last report time, or cumulative changes since a fixed start time. + aggregation_temporality: "AggregationTemporality" = betterproto.enum_field(2) + # If "true" means that the sum is monotonic. + is_monotonic: bool = betterproto.bool_field(3) + + +@dataclass +class Histogram(betterproto.Message): + """ + Histogram represents the type of a metric that is calculated by aggregating + as a Histogram of all reported measurements over a time interval. + """ + + data_points: List["HistogramDataPoint"] = betterproto.message_field(1) + # aggregation_temporality describes if the aggregator reports delta changes + # since last report time, or cumulative changes since a fixed start time. + aggregation_temporality: "AggregationTemporality" = betterproto.enum_field(2) + + +@dataclass +class ExponentialHistogram(betterproto.Message): + """ + ExponentialHistogram represents the type of a metric that is calculated by + aggregating as a ExponentialHistogram of all reported double measurements + over a time interval. + """ + + data_points: List["ExponentialHistogramDataPoint"] = betterproto.message_field(1) + # aggregation_temporality describes if the aggregator reports delta changes + # since last report time, or cumulative changes since a fixed start time. + aggregation_temporality: "AggregationTemporality" = betterproto.enum_field(2) + + +@dataclass +class Summary(betterproto.Message): + """ + Summary metric data are used to convey quantile summaries, a Prometheus + (see: https://prometheus.io/docs/concepts/metric_types/#summary) and + OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4db + f6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) data + type. These data points cannot always be merged in a meaningful way. While + they can be useful in some applications, histogram data points are + recommended for new applications. + """ + + data_points: List["SummaryDataPoint"] = betterproto.message_field(1) + + +@dataclass +class NumberDataPoint(betterproto.Message): + """ + NumberDataPoint is a single data point in a timeseries that describes the + time-varying scalar value of a metric. + """ + + # The set of key/value pairs that uniquely identify the timeseries from where + # this point belongs. The list may be empty (may contain 0 elements). + # Attribute keys MUST be unique (it is not allowed to have more than one + # attribute with the same key). + attributes: List[common_pb2.KeyValue] = betterproto.message_field(7) + # StartTimeUnixNano is optional but strongly encouraged, see the the detailed + # comments above Metric. Value is UNIX Epoch time in nanoseconds since + # 00:00:00 UTC on 1 January 1970. + start_time_unix_nano: float = betterproto.fixed64_field(2) + # TimeUnixNano is required, see the detailed comments above Metric. Value is + # UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + time_unix_nano: float = betterproto.fixed64_field(3) + as_double: float = betterproto.double_field(4, group="value") + as_int: float = betterproto.sfixed64_field(6, group="value") + # (Optional) List of exemplars collected from measurements that were used to + # form the data point + exemplars: List["Exemplar"] = betterproto.message_field(5) + # Flags that apply to this specific data point. See DataPointFlags for the + # available flags and their meaning. + flags: int = betterproto.uint32_field(8) + + +@dataclass +class HistogramDataPoint(betterproto.Message): + """ + HistogramDataPoint is a single data point in a timeseries that describes + the time-varying values of a Histogram. A Histogram contains summary + statistics for a population of values, it may optionally contain the + distribution of those values across a set of buckets. If the histogram + contains the distribution of values, then both "explicit_bounds" and + "bucket counts" fields must be defined. If the histogram does not contain + the distribution of values, then both "explicit_bounds" and "bucket_counts" + must be omitted and only "count" and "sum" are known. + """ + + # The set of key/value pairs that uniquely identify the timeseries from where + # this point belongs. The list may be empty (may contain 0 elements). + # Attribute keys MUST be unique (it is not allowed to have more than one + # attribute with the same key). + attributes: List[common_pb2.KeyValue] = betterproto.message_field(9) + # StartTimeUnixNano is optional but strongly encouraged, see the the detailed + # comments above Metric. Value is UNIX Epoch time in nanoseconds since + # 00:00:00 UTC on 1 January 1970. + start_time_unix_nano: float = betterproto.fixed64_field(2) + # TimeUnixNano is required, see the detailed comments above Metric. Value is + # UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + time_unix_nano: float = betterproto.fixed64_field(3) + # count is the number of values in the population. Must be non-negative. This + # value must be equal to the sum of the "count" fields in buckets if a + # histogram is provided. + count: float = betterproto.fixed64_field(4) + # sum of the values in the population. If count is zero then this field must + # be zero. Note: Sum should only be filled out when measuring non-negative + # discrete events, and is assumed to be monotonic over the values of these + # events. Negative events *can* be recorded, but sum should not be filled out + # when doing so. This is specifically to enforce compatibility w/ + # OpenMetrics, see: https://github.com/OpenObservability/OpenMetrics/blob/mai + # n/specification/OpenMetrics.md#histogram + sum: float = betterproto.double_field(5) + # bucket_counts is an optional field contains the count values of histogram + # for each bucket. The sum of the bucket_counts must equal the value in the + # count field. The number of elements in bucket_counts array must be by one + # greater than the number of elements in explicit_bounds array. + bucket_counts: List[float] = betterproto.fixed64_field(6) + # explicit_bounds specifies buckets with explicitly defined bounds for + # values. The boundaries for bucket at index i are: (-infinity, + # explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] + # for 0 < i < size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i + # == size(explicit_bounds) The values in the explicit_bounds array must be + # strictly increasing. Histogram buckets are inclusive of their upper + # boundary, except the last bucket where the boundary is at infinity. This + # format is intentionally compatible with the OpenMetrics histogram + # definition. + explicit_bounds: List[float] = betterproto.double_field(7) + # (Optional) List of exemplars collected from measurements that were used to + # form the data point + exemplars: List["Exemplar"] = betterproto.message_field(8) + # Flags that apply to this specific data point. See DataPointFlags for the + # available flags and their meaning. + flags: int = betterproto.uint32_field(10) + # min is the minimum value over (start_time, end_time]. + min: float = betterproto.double_field(11) + # max is the maximum value over (start_time, end_time]. + max: float = betterproto.double_field(12) + + +@dataclass +class ExponentialHistogramDataPoint(betterproto.Message): + """ + ExponentialHistogramDataPoint is a single data point in a timeseries that + describes the time-varying values of a ExponentialHistogram of double + values. A ExponentialHistogram contains summary statistics for a population + of values, it may optionally contain the distribution of those values + across a set of buckets. + """ + + # The set of key/value pairs that uniquely identify the timeseries from where + # this point belongs. The list may be empty (may contain 0 elements). + # Attribute keys MUST be unique (it is not allowed to have more than one + # attribute with the same key). + attributes: List[common_pb2.KeyValue] = betterproto.message_field(1) + # StartTimeUnixNano is optional but strongly encouraged, see the the detailed + # comments above Metric. Value is UNIX Epoch time in nanoseconds since + # 00:00:00 UTC on 1 January 1970. + start_time_unix_nano: float = betterproto.fixed64_field(2) + # TimeUnixNano is required, see the detailed comments above Metric. Value is + # UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + time_unix_nano: float = betterproto.fixed64_field(3) + # count is the number of values in the population. Must be non-negative. This + # value must be equal to the sum of the "bucket_counts" values in the + # positive and negative Buckets plus the "zero_count" field. + count: float = betterproto.fixed64_field(4) + # sum of the values in the population. If count is zero then this field must + # be zero. Note: Sum should only be filled out when measuring non-negative + # discrete events, and is assumed to be monotonic over the values of these + # events. Negative events *can* be recorded, but sum should not be filled out + # when doing so. This is specifically to enforce compatibility w/ + # OpenMetrics, see: https://github.com/OpenObservability/OpenMetrics/blob/mai + # n/specification/OpenMetrics.md#histogram + sum: float = betterproto.double_field(5) + # scale describes the resolution of the histogram. Boundaries are located at + # powers of the base, where: base = (2^(2^-scale)) The histogram bucket + # identified by `index`, a signed integer, contains values that are greater + # than (base^index) and less than or equal to (base^(index+1)). The positive + # and negative ranges of the histogram are expressed separately. Negative + # values are mapped by their absolute value into the negative range using the + # same scale as the positive range. scale is not restricted by the protocol, + # as the permissible values depend on the range of the data. + scale: int = betterproto.sint32_field(6) + # zero_count is the count of values that are either exactly zero or within + # the region considered zero by the instrumentation at the tolerated degree + # of precision. This bucket stores values that cannot be expressed using the + # standard exponential formula as well as values that have been rounded to + # zero. Implementations MAY consider the zero bucket to have probability mass + # equal to (zero_count / count). + zero_count: float = betterproto.fixed64_field(7) + # positive carries the positive range of exponential bucket counts. + positive: "ExponentialHistogramDataPointBuckets" = betterproto.message_field(8) + # negative carries the negative range of exponential bucket counts. + negative: "ExponentialHistogramDataPointBuckets" = betterproto.message_field(9) + # Flags that apply to this specific data point. See DataPointFlags for the + # available flags and their meaning. + flags: int = betterproto.uint32_field(10) + # (Optional) List of exemplars collected from measurements that were used to + # form the data point + exemplars: List["Exemplar"] = betterproto.message_field(11) + # min is the minimum value over (start_time, end_time]. + min: float = betterproto.double_field(12) + # max is the maximum value over (start_time, end_time]. + max: float = betterproto.double_field(13) + # ZeroThreshold may be optionally set to convey the width of the zero region. + # Where the zero region is defined as the closed interval [-ZeroThreshold, + # ZeroThreshold]. When ZeroThreshold is 0, zero count bucket stores values + # that cannot be expressed using the standard exponential formula as well as + # values that have been rounded to zero. + zero_threshold: float = betterproto.double_field(14) + + +@dataclass +class ExponentialHistogramDataPointBuckets(betterproto.Message): + """ + Buckets are a set of bucket counts, encoded in a contiguous array of + counts. + """ + + # Offset is the bucket index of the first entry in the bucket_counts array. + # Note: This uses a varint encoding as a simple form of compression. + offset: int = betterproto.sint32_field(1) + # bucket_counts is an array of count values, where bucket_counts[i] carries + # the count of the bucket at index (offset+i). bucket_counts[i] is the count + # of values greater than base^(offset+i) and less than or equal to + # base^(offset+i+1). Note: By contrast, the explicit HistogramDataPoint uses + # fixed64. This field is expected to have many buckets, especially zeros, so + # uint64 has been selected to ensure varint encoding. + bucket_counts: List[int] = betterproto.uint64_field(2) + + +@dataclass +class SummaryDataPoint(betterproto.Message): + """ + SummaryDataPoint is a single data point in a timeseries that describes the + time-varying values of a Summary metric. + """ + + # The set of key/value pairs that uniquely identify the timeseries from where + # this point belongs. The list may be empty (may contain 0 elements). + # Attribute keys MUST be unique (it is not allowed to have more than one + # attribute with the same key). + attributes: List[common_pb2.KeyValue] = betterproto.message_field(7) + # StartTimeUnixNano is optional but strongly encouraged, see the the detailed + # comments above Metric. Value is UNIX Epoch time in nanoseconds since + # 00:00:00 UTC on 1 January 1970. + start_time_unix_nano: float = betterproto.fixed64_field(2) + # TimeUnixNano is required, see the detailed comments above Metric. Value is + # UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + time_unix_nano: float = betterproto.fixed64_field(3) + # count is the number of values in the population. Must be non-negative. + count: float = betterproto.fixed64_field(4) + # sum of the values in the population. If count is zero then this field must + # be zero. Note: Sum should only be filled out when measuring non-negative + # discrete events, and is assumed to be monotonic over the values of these + # events. Negative events *can* be recorded, but sum should not be filled out + # when doing so. This is specifically to enforce compatibility w/ + # OpenMetrics, see: https://github.com/OpenObservability/OpenMetrics/blob/mai + # n/specification/OpenMetrics.md#summary + sum: float = betterproto.double_field(5) + # (Optional) list of values at different quantiles of the distribution + # calculated from the current snapshot. The quantiles must be strictly + # increasing. + quantile_values: List["SummaryDataPointValueAtQuantile"] = ( + betterproto.message_field(6) + ) + # Flags that apply to this specific data point. See DataPointFlags for the + # available flags and their meaning. + flags: int = betterproto.uint32_field(8) + + +@dataclass +class SummaryDataPointValueAtQuantile(betterproto.Message): + """ + Represents the value at a given quantile of a distribution. To record Min + and Max values following conventions are used: - The 1.0 quantile is + equivalent to the maximum value observed. - The 0.0 quantile is equivalent + to the minimum value observed. See the following issue for more context: + https://github.com/open-telemetry/opentelemetry-proto/issues/125 + """ + + # The quantile of a distribution. Must be in the interval [0.0, 1.0]. + quantile: float = betterproto.double_field(1) + # The value at the given quantile of a distribution. Quantile values must NOT + # be negative. + value: float = betterproto.double_field(2) + + +@dataclass +class Exemplar(betterproto.Message): + """ + A representation of an exemplar, which is a sample input measurement. + Exemplars also hold information about the environment when the measurement + was recorded, for example the span and trace ID of the active span when the + exemplar was recorded. + """ + + # The set of key/value pairs that were filtered out by the aggregator, but + # recorded alongside the original measurement. Only key/value pairs that were + # filtered out by the aggregator should be included + filtered_attributes: List[common_pb2.KeyValue] = betterproto.message_field(7) + # time_unix_nano is the exact time when this exemplar was recorded Value is + # UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + time_unix_nano: float = betterproto.fixed64_field(2) + as_double: float = betterproto.double_field(3, group="value") + as_int: float = betterproto.sfixed64_field(6, group="value") + # (Optional) Span ID of the exemplar trace. span_id may be missing if the + # measurement is not recorded inside a trace or if the trace is not sampled. + span_id: bytes = betterproto.bytes_field(4) + # (Optional) Trace ID of the exemplar trace. trace_id may be missing if the + # measurement is not recorded inside a trace or if the trace is not sampled. + trace_id: bytes = betterproto.bytes_field(5) diff --git a/src/opentelemetry/proto/resource/v1/__init__.py b/src/opentelemetry/proto/resource/v1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/opentelemetry/proto/resource/v1/resource_pb2.py b/src/opentelemetry/proto/resource/v1/resource_pb2.py new file mode 100644 index 0000000..020ce96 --- /dev/null +++ b/src/opentelemetry/proto/resource/v1/resource_pb2.py @@ -0,0 +1,21 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# sources: opentelemetry/proto/resource/v1/resource.proto +# plugin: python-betterproto +from dataclasses import dataclass +from typing import List + +import betterproto + +from opentelemetry.proto.common.v1 import common_pb2 + + +@dataclass +class Resource(betterproto.Message): + """Resource information.""" + + # Set of attributes that describe the resource. Attribute keys MUST be unique + # (it is not allowed to have more than one attribute with the same key). + attributes: List[common_pb2.KeyValue] = betterproto.message_field(1) + # dropped_attributes_count is the number of dropped attributes. If the value + # is 0, then no attributes were dropped. + dropped_attributes_count: int = betterproto.uint32_field(2) diff --git a/src/opentelemetry/proto/trace/v1/__init__.py b/src/opentelemetry/proto/trace/v1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/opentelemetry/proto/trace/v1/trace_pb2.py b/src/opentelemetry/proto/trace/v1/trace_pb2.py new file mode 100644 index 0000000..6a3c440 --- /dev/null +++ b/src/opentelemetry/proto/trace/v1/trace_pb2.py @@ -0,0 +1,279 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# sources: opentelemetry/proto/trace/v1/trace.proto +# plugin: python-betterproto +from dataclasses import dataclass +from typing import List + +import betterproto + +from opentelemetry.proto.common.v1 import common_pb2 +from opentelemetry.proto.resource.v1 import resource_pb2 + + +class SpanFlags(betterproto.Enum): + """ + SpanFlags represents constants used to interpret the Span.flags field, + which is protobuf 'fixed32' type and is to be used as bit-fields. Each non- + zero value defined in this enum is a bit-mask. To extract the bit-field, + for example, use an expression like: (span.flags & + SPAN_FLAGS_TRACE_FLAGS_MASK) See https://www.w3.org/TR/trace- + context-2/#trace-flags for the flag definitions. Note that Span flags were + introduced in version 1.1 of the OpenTelemetry protocol. Older Span + producers do not set this field, consequently consumers should not rely on + the absence of a particular flag bit to indicate the presence of a + particular feature. + """ + + # The zero value for the enum. Should not be used for comparisons. Instead + # use bitwise "and" with the appropriate mask as shown above. + SPAN_FLAGS_DO_NOT_USE = 0 + # Bits 0-7 are used for trace flags. + SPAN_FLAGS_TRACE_FLAGS_MASK = 255 + # Bits 8 and 9 are used to indicate that the parent span or link span is + # remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. Bit 9 + # (`IS_REMOTE`) indicates whether the span or link is remote. + SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK = 256 + SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK = 512 + + +class SpanSpanKind(betterproto.Enum): + SPAN_KIND_UNSPECIFIED = 0 + SPAN_KIND_INTERNAL = 1 + SPAN_KIND_SERVER = 2 + SPAN_KIND_CLIENT = 3 + SPAN_KIND_PRODUCER = 4 + SPAN_KIND_CONSUMER = 5 + + +class StatusStatusCode(betterproto.Enum): + STATUS_CODE_UNSET = 0 + STATUS_CODE_OK = 1 + STATUS_CODE_ERROR = 2 + + +@dataclass +class TracesData(betterproto.Message): + """ + TracesData represents the traces data that can be stored in a persistent + storage, OR can be embedded by other protocols that transfer OTLP traces + data but do not implement the OTLP protocol. The main difference between + this message and collector protocol is that in this message there will not + be any "control" or "metadata" specific to OTLP protocol. When new fields + are added into this message, the OTLP request MUST be updated as well. + """ + + # An array of ResourceSpans. For data coming from a single resource this + # array will typically contain one element. Intermediary nodes that receive + # data from multiple origins typically batch the data before forwarding + # further and in that case this array will contain multiple elements. + resource_spans: List["ResourceSpans"] = betterproto.message_field(1) + + +@dataclass +class ResourceSpans(betterproto.Message): + """A collection of ScopeSpans from a Resource.""" + + # The resource for the spans in this message. If this field is not set then + # no resource info is known. + resource: resource_pb2.Resource = betterproto.message_field(1) + # A list of ScopeSpans that originate from a resource. + scope_spans: List["ScopeSpans"] = betterproto.message_field(2) + # The Schema URL, if known. This is the identifier of the Schema that the + # resource data is recorded in. To learn more about Schema URL see + # https://opentelemetry.io/docs/specs/otel/schemas/#schema-url This + # schema_url applies to the data in the "resource" field. It does not apply + # to the data in the "scope_spans" field which have their own schema_url + # field. + schema_url: str = betterproto.string_field(3) + + +@dataclass +class ScopeSpans(betterproto.Message): + """A collection of Spans produced by an InstrumentationScope.""" + + # The instrumentation scope information for the spans in this message. + # Semantically when InstrumentationScope isn't set, it is equivalent with an + # empty instrumentation scope name (unknown). + scope: common_pb2.InstrumentationScope = betterproto.message_field(1) + # A list of Spans that originate from an instrumentation scope. + spans: List["Span"] = betterproto.message_field(2) + # The Schema URL, if known. This is the identifier of the Schema that the + # span data is recorded in. To learn more about Schema URL see + # https://opentelemetry.io/docs/specs/otel/schemas/#schema-url This + # schema_url applies to all spans and span events in the "spans" field. + schema_url: str = betterproto.string_field(3) + + +@dataclass +class Span(betterproto.Message): + """ + A Span represents a single operation performed by a single component of the + system. The next available field id is 17. + """ + + # A unique identifier for a trace. All spans from the same trace share the + # same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR of + # length other than 16 bytes is considered invalid (empty string in OTLP/JSON + # is zero-length and thus is also invalid). This field is required. + trace_id: bytes = betterproto.bytes_field(1) + # A unique identifier for a span within a trace, assigned when the span is + # created. The ID is an 8-byte array. An ID with all zeroes OR of length + # other than 8 bytes is considered invalid (empty string in OTLP/JSON is + # zero-length and thus is also invalid). This field is required. + span_id: bytes = betterproto.bytes_field(2) + # trace_state conveys information about request position in multiple + # distributed tracing graphs. It is a trace_state in w3c-trace-context + # format: https://www.w3.org/TR/trace-context/#tracestate-header See also + # https://github.com/w3c/distributed-tracing for more details about this + # field. + trace_state: str = betterproto.string_field(3) + # The `span_id` of this span's parent span. If this is a root span, then this + # field must be empty. The ID is an 8-byte array. + parent_span_id: bytes = betterproto.bytes_field(4) + # Flags, a bit field. Bits 0-7 (8 least significant bits) are the trace flags + # as defined in W3C Trace Context specification. To read the 8-bit W3C trace + # flag, use `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. See + # https://www.w3.org/TR/trace-context-2/#trace-flags for the flag + # definitions. Bits 8 and 9 represent the 3 states of whether a span's parent + # is remote. The states are (unknown, is not remote, is remote). To read + # whether the value is known, use `(flags & + # SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. To read whether the span is + # remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. When + # creating span messages, if the message is logically forwarded from another + # source with an equivalent flags fields (i.e., usually another OTLP span + # message), the field SHOULD be copied as-is. If creating from a source that + # does not have an equivalent flags field (such as a runtime representation + # of an OpenTelemetry span), the high 22 bits MUST be set to zero. Readers + # MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + # [Optional]. + flags: float = betterproto.fixed32_field(16) + # A description of the span's operation. For example, the name can be a + # qualified method name or a file name and a line number where the operation + # is called. A best practice is to use the same display name at the same call + # point in an application. This makes it easier to correlate spans in + # different traces. This field is semantically required to be set to non- + # empty string. Empty value is equivalent to an unknown span name. This field + # is required. + name: str = betterproto.string_field(5) + # Distinguishes between spans generated in a particular context. For example, + # two spans with the same name may be distinguished using `CLIENT` (caller) + # and `SERVER` (callee) to identify queueing latency associated with the + # span. + kind: "SpanSpanKind" = betterproto.enum_field(6) + # start_time_unix_nano is the start time of the span. On the client side, + # this is the time kept by the local machine where the span execution starts. + # On the server side, this is the time when the server's application handler + # starts running. Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC + # on 1 January 1970. This field is semantically required and it is expected + # that end_time >= start_time. + start_time_unix_nano: float = betterproto.fixed64_field(7) + # end_time_unix_nano is the end time of the span. On the client side, this is + # the time kept by the local machine where the span execution ends. On the + # server side, this is the time when the server application handler stops + # running. Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 + # January 1970. This field is semantically required and it is expected that + # end_time >= start_time. + end_time_unix_nano: float = betterproto.fixed64_field(8) + # attributes is a collection of key/value pairs. Note, global attributes like + # server name can be set using the resource API. Examples of attributes: + # "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) + # AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + # "/http/server_latency": 300 "example.com/myattribute": true + # "example.com/score": 10.239 The OpenTelemetry API specification further + # restricts the allowed value types: https://github.com/open- + # telemetry/opentelemetry- + # specification/blob/main/specification/common/README.md#attribute Attribute + # keys MUST be unique (it is not allowed to have more than one attribute with + # the same key). + attributes: List[common_pb2.KeyValue] = betterproto.message_field(9) + # dropped_attributes_count is the number of attributes that were discarded. + # Attributes can be discarded because their keys are too long or because + # there are too many attributes. If this value is 0, then no attributes were + # dropped. + dropped_attributes_count: int = betterproto.uint32_field(10) + # events is a collection of Event items. + events: List["SpanEvent"] = betterproto.message_field(11) + # dropped_events_count is the number of dropped events. If the value is 0, + # then no events were dropped. + dropped_events_count: int = betterproto.uint32_field(12) + # links is a collection of Links, which are references from this span to a + # span in the same or different trace. + links: List["SpanLink"] = betterproto.message_field(13) + # dropped_links_count is the number of dropped links after the maximum size + # was enforced. If this value is 0, then no links were dropped. + dropped_links_count: int = betterproto.uint32_field(14) + # An optional final status for this span. Semantically when Status isn't set, + # it means span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = + # 0). + status: "Status" = betterproto.message_field(15) + + +@dataclass +class SpanEvent(betterproto.Message): + """ + Event is a time-stamped annotation of the span, consisting of user-supplied + text description and key-value pairs. + """ + + # time_unix_nano is the time the event occurred. + time_unix_nano: float = betterproto.fixed64_field(1) + # name of the event. This field is semantically required to be set to non- + # empty string. + name: str = betterproto.string_field(2) + # attributes is a collection of attribute key/value pairs on the event. + # Attribute keys MUST be unique (it is not allowed to have more than one + # attribute with the same key). + attributes: List[common_pb2.KeyValue] = betterproto.message_field(3) + # dropped_attributes_count is the number of dropped attributes. If the value + # is 0, then no attributes were dropped. + dropped_attributes_count: int = betterproto.uint32_field(4) + + +@dataclass +class SpanLink(betterproto.Message): + """ + A pointer from the current span to another span in the same trace or in a + different trace. For example, this can be used in batching operations, + where a single batch handler processes multiple requests from different + traces or when the handler receives a request from a different project. + """ + + # A unique identifier of a trace that this linked span is part of. The ID is + # a 16-byte array. + trace_id: bytes = betterproto.bytes_field(1) + # A unique identifier for the linked span. The ID is an 8-byte array. + span_id: bytes = betterproto.bytes_field(2) + # The trace_state associated with the link. + trace_state: str = betterproto.string_field(3) + # attributes is a collection of attribute key/value pairs on the link. + # Attribute keys MUST be unique (it is not allowed to have more than one + # attribute with the same key). + attributes: List[common_pb2.KeyValue] = betterproto.message_field(4) + # dropped_attributes_count is the number of dropped attributes. If the value + # is 0, then no attributes were dropped. + dropped_attributes_count: int = betterproto.uint32_field(5) + # Flags, a bit field. Bits 0-7 (8 least significant bits) are the trace flags + # as defined in W3C Trace Context specification. To read the 8-bit W3C trace + # flag, use `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. See + # https://www.w3.org/TR/trace-context-2/#trace-flags for the flag + # definitions. Bits 8 and 9 represent the 3 states of whether the link is + # remote. The states are (unknown, is not remote, is remote). To read whether + # the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != + # 0`. To read whether the link is remote, use `(flags & + # SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. Readers MUST NOT assume that bits + # 10-31 (22 most significant bits) will be zero. When creating new spans, + # bits 10-31 (most-significant 22-bits) MUST be zero. [Optional]. + flags: float = betterproto.fixed32_field(6) + + +@dataclass +class Status(betterproto.Message): + """ + The Status type defines a logical error model that is suitable for + different programming environments, including REST APIs and RPC APIs. + """ + + # A developer-facing human readable error message. + message: str = betterproto.string_field(2) + # The status code. + code: "StatusStatusCode" = betterproto.enum_field(3)