From 577f4f2d5589249ef71e50de217ffb86e52d60c1 Mon Sep 17 00:00:00 2001 From: Jonas Kunz Date: Thu, 15 Aug 2024 10:10:04 +0200 Subject: [PATCH 1/3] Replace inferred-spans extension with upstream contrib extension --- .../otel/common/ElasticAttributes.java | 10 +- inferred-spans/README.md | 199 +-- inferred-spans/build.gradle.kts | 10 +- .../otel/InferredSpansConfigMigration.java | 82 ++ .../co/elastic/otel/profiler/CallTree.java | 924 -------------- .../co/elastic/otel/profiler/ChildList.java | 63 - .../profiler/InferredSpansAutoConfig.java | 75 -- .../profiler/InferredSpansConfiguration.java | 117 -- .../otel/profiler/InferredSpansProcessor.java | 120 -- .../InferredSpansProcessorBuilder.java | 200 --- .../profiler/ProfilingActivationListener.java | 153 --- .../otel/profiler/SamplingProfiler.java | 1054 ---------------- .../otel/profiler/SpanAnchoredClock.java | 66 - .../co/elastic/otel/profiler/StackFrame.java | 103 -- .../elastic/otel/profiler/ThreadMatcher.java | 65 - .../elastic/otel/profiler/TraceContext.java | 200 --- .../profiler/asyncprofiler/BufferedFile.java | 442 ------- .../profiler/asyncprofiler/JfrParser.java | 499 -------- .../asyncprofiler/ResourceExtractionUtil.java | 169 --- .../profiler/collections/CollectionUtil.java | 81 -- .../otel/profiler/collections/Hashing.java | 146 --- .../profiler/collections/Int2IntHashMap.java | 876 ------------- .../collections/Int2ObjectHashMap.java | 808 ------------ .../profiler/collections/IntIntConsumer.java | 46 - .../collections/Long2LongHashMap.java | 879 ------------- .../collections/Long2ObjectHashMap.java | 807 ------------ .../profiler/collections/LongHashSet.java | 704 ----------- .../otel/profiler/collections/LongList.java | 154 --- .../collections/LongLongConsumer.java | 46 - .../profiler/collections/package-info.java | 28 - .../profiler/pooling/AbstractObjectPool.java | 74 -- .../otel/profiler/pooling/Allocator.java | 32 - .../otel/profiler/pooling/ObjectPool.java | 53 - .../pooling/QueueBasedObjectPool.java | 97 -- .../otel/profiler/pooling/Recyclable.java | 25 - .../otel/profiler/pooling/Resetter.java | 53 - .../elastic/otel/profiler/util/ByteUtils.java | 43 - .../otel/profiler/util/ThreadUtils.java | 52 - .../InferredSpansAutoConfigTest.java | 114 +- .../otel/profiler/CallTreeSpanifyTest.java | 274 ---- .../elastic/otel/profiler/CallTreeTest.java | 1097 ----------------- .../co/elastic/otel/profiler/FixedClock.java | 53 - .../otel/profiler/ProfilerTestSetup.java | 72 -- .../profiler/SamplingProfilerQueueTest.java | 68 - .../otel/profiler/SamplingProfilerReplay.java | 79 -- .../otel/profiler/SamplingProfilerTest.java | 403 ------ .../otel/profiler/ThreadMatcherTest.java | 52 - .../profiler/asyncprofiler/JfrParserTest.java | 66 - licenses/more-licences.md | 44 +- .../inferred-spans-test/build.gradle.kts | 8 +- 50 files changed, 154 insertions(+), 11731 deletions(-) create mode 100644 inferred-spans/src/main/java/co/elastic/otel/InferredSpansConfigMigration.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/CallTree.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/ChildList.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/InferredSpansAutoConfig.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/InferredSpansConfiguration.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/InferredSpansProcessor.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/InferredSpansProcessorBuilder.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/ProfilingActivationListener.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/SamplingProfiler.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/SpanAnchoredClock.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/StackFrame.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/ThreadMatcher.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/TraceContext.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/asyncprofiler/BufferedFile.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/asyncprofiler/JfrParser.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/asyncprofiler/ResourceExtractionUtil.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/collections/CollectionUtil.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Hashing.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Int2IntHashMap.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Int2ObjectHashMap.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/collections/IntIntConsumer.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Long2LongHashMap.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Long2ObjectHashMap.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/collections/LongHashSet.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/collections/LongList.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/collections/LongLongConsumer.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/collections/package-info.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/AbstractObjectPool.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/Allocator.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/ObjectPool.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/QueueBasedObjectPool.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/Recyclable.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/Resetter.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/util/ByteUtils.java delete mode 100644 inferred-spans/src/main/java/co/elastic/otel/profiler/util/ThreadUtils.java rename inferred-spans/src/test/java/co/elastic/otel/{profiler => }/InferredSpansAutoConfigTest.java (50%) delete mode 100644 inferred-spans/src/test/java/co/elastic/otel/profiler/CallTreeSpanifyTest.java delete mode 100644 inferred-spans/src/test/java/co/elastic/otel/profiler/CallTreeTest.java delete mode 100644 inferred-spans/src/test/java/co/elastic/otel/profiler/FixedClock.java delete mode 100644 inferred-spans/src/test/java/co/elastic/otel/profiler/ProfilerTestSetup.java delete mode 100644 inferred-spans/src/test/java/co/elastic/otel/profiler/SamplingProfilerQueueTest.java delete mode 100644 inferred-spans/src/test/java/co/elastic/otel/profiler/SamplingProfilerReplay.java delete mode 100644 inferred-spans/src/test/java/co/elastic/otel/profiler/SamplingProfilerTest.java delete mode 100644 inferred-spans/src/test/java/co/elastic/otel/profiler/ThreadMatcherTest.java delete mode 100644 inferred-spans/src/test/java/co/elastic/otel/profiler/asyncprofiler/JfrParserTest.java diff --git a/common/src/main/java/co/elastic/otel/common/ElasticAttributes.java b/common/src/main/java/co/elastic/otel/common/ElasticAttributes.java index f0dc104e..5526d90e 100644 --- a/common/src/main/java/co/elastic/otel/common/ElasticAttributes.java +++ b/common/src/main/java/co/elastic/otel/common/ElasticAttributes.java @@ -30,11 +30,11 @@ public interface ElasticAttributes { AttributeKey SPAN_TYPE = AttributeKey.stringKey("elastic.span.type"); AttributeKey SPAN_SUBTYPE = AttributeKey.stringKey("elastic.span.subtype"); - /** Marker attribute for inferred spans. */ - AttributeKey IS_INFERRED = AttributeKey.booleanKey("elastic.is_inferred"); - - /** Used as marker on span-links to override the parent-child relationship for inferred spans. */ - AttributeKey IS_CHILD = AttributeKey.booleanKey("elastic.is_child"); + /** + * Marker attribute for inferred spans. Does not have the elastic-prefix anymore because it has + * been contributed upstream + */ + AttributeKey IS_INFERRED = AttributeKey.booleanKey("is_inferred"); AttributeKey> PROFILER_STACK_TRACE_IDS = AttributeKey.stringArrayKey("elastic.profiler_stack_trace_ids"); diff --git a/inferred-spans/README.md b/inferred-spans/README.md index b68d451a..e1b0f9fc 100644 --- a/inferred-spans/README.md +++ b/inferred-spans/README.md @@ -1,196 +1,3 @@ -OpenTelemetry extension for generating spans via profiling instead of instrumentation. -This extension enhances traces by running [async-profiler](https://github.com/async-profiler/async-profiler) in wall-clock profiling mode whenever there is an active sampled OpenTelemetry span. - -The resulting profiling data is afterwards analyzed and spans are "inferred". -This means there is a delay between the regular and the inferred spans being visible in the UI. - -Only platform threads are supported. Virtual threads are not supported and will not be profiled. - -## Usage - -This section describes the usage of this extension outside of an agent. -Add the following dependency to your project: - -``` - - co.elastic.otel - inferred-spans - {latest version} - -``` - -### Autoconfiguration - -This extension supports [autoconfiguration](https://github.com/open-telemetry/opentelemetry-java/tree/main/sdk-extensions/autoconfigure). - -So if you are using an autoconfigured OpenTelemetry SDK, you'll only need to add this extension to your class path and configure it via system properties or environment variables: - -| Property Name / Environment Variable Name | Default | Description | -|---------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| elastic.otel.inferred.spans.enabled
ELASTIC_OTEL_INFERRED_SPANS_ENABLED | `false` | Enables the inferred spans feature. | -| elastic.otel.inferred.spans.logging.enabled
ELASTIC_OTEL_INFERRED_SPANS_LOGGING_ENABLED | `true` | By default, async profiler prints warning messages about missing JVM symbols to standard output. Set this option to `true` to suppress such messages | -| elastic.otel.inferred.spans.backup.diagnostic.files
ELASTIC_OTEL_INFERRED_SPANS_BACKUP_DIAGNOSTIC_FILES | `false` | Do not delete the temporary profiling files, can be used later to reproduce in case of issues. | -| elastic.otel.inferred.spans.safe.mode
ELASTIC_OTEL_INFERRED_SPANS_SAFE_MODE | `0` | Can be used for analysis: the Async Profiler's area that deals with recovering stack trace frames is known to be sensitive in some systems. It is used as a bit mask using values are between 0 and 31, where 0 enables all recovery attempts and 31 disables all five (corresponding 1, 2, 4, 8 and 16). | -| elastic.otel.inferred.spans.post.processing.enabled
ELASTIC_OTEL_INFERRED_SPANS_POST_PROCESSING_ENABLED | `true` | Can be used to test the effect of the async-profiler in isolation from the agent's post-processing. | -| elastic.otel.inferred.spans.sampling.interval
ELASTIC_OTEL_INFERRED_SPANS_SAMPLING_INTERVAL | `50ms` | he frequency at which stack traces are gathered within a profiling session. The lower you set it, the more accurate the durations will be. This comes at the expense of higher overhead and more spans for potentially irrelevant operations. The minimal duration of a profiling-inferred span is the same as the value of this setting. | -| elastic.otel.inferred.spans.min.duration
ELASTIC_OTEL_INFERRED_SPANS_MIN_DURATION | `0ms` | The minimum duration of an inferred span. Note that the min duration is also implicitly set by the sampling interval. However, increasing the sampling interval also decreases the accuracy of the duration of inferred spans. | -| elastic.otel.inferred.spans.included.classes
ELASTIC_OTEL_INFERRED_SPANS_INCLUDED_CLASSES | `*` | If set, the agent will only create inferred spans for methods which match this list. Setting a value may slightly reduce overhead and can reduce clutter by only creating spans for the classes you are interested in.
Example: `org.example.myapp.*` | -| elastic.otel.inferred.spans.excluded.classes
ELASTIC_OTEL_INFERRED_SPANS_EXCLUDED_CLASSES | `java.*`
`javax.*`
`sun.*`
`com.sun.*`
`jdk.*`
`org.apache.tomcat.*`
`org.apache.catalina.*`
`org.apache.coyote.*`
`org.jboss.as.*`
`org.glassfish.*`
`org.eclipse.jetty.*`
`com.ibm.websphere.*`
`io.undertow.*` | Excludes classes for which no profiler-inferred spans should be created. | -| elastic.otel.inferred.spans.interval
ELASTIC_OTEL_INFERRED_SPANS_INTERVAL | `5s` | The interval at which profiling sessions should be started. | -| elastic.otel.inferred.spans.duration
ELASTIC_OTEL_INFERRED_SPANS_DURATION | `5s` | The duration of a profiling session. For sampled transactions which fall within a profiling session (they start after and end before the session), so-called inferred spans will be created. They appear in the trace waterfall view like regular spans.
NOTE: It is not recommended to set much higher durations as it may fill the activation events file and async-profiler's frame buffer. Warnings will be logged if the activation events file is full. If you want to have more profiling coverage, try decreasing `profiling_inferred_spans_interval` | -| elastic.otel.inferred.spans.lib.directory
ELASTIC_OTEL_INFERRED_SPANS_LIB_DIRECTORY | Defaults to the value of `java.io.tmpdir` | Profiling requires that the https://github.com/jvm-profiling-tools/async-profiler[async-profiler] shared library is exported to a temporary location and loaded by the JVM. The partition backing this location must be executable, however in some server-hardened environments, `noexec` may be set on the standard `/tmp` partition, leading to `java.lang.UnsatisfiedLinkError` errors. Set this property to an alternative directory (e.g. `/var/tmp`) to resolve this. | - - - - -### Manual SDK setup - -If you manually set-up your `OpenTelemetrySDK`, you need to create and register an `InferredSpansProcessor` with your `TracerProvider`: - -```java -InferredSpansProcessor inferredSpans = InferredSpansProcessor.builder() - //.samplingInterval(Duration.ofMillis(10)) - .build(); -SdkTracerProvider tracerProvider = SdkTracerProvider.builder() - .addSpanProcessor(inferredSpans) - .addSpanProcessor(BatchSpanProcessor.builder(OtlpGrpcSpanExporter.builder() - .setEndpoint("https://.apm.europe-west3.gcp.cloud.es.io:443") - .addHeader("Authorization", "Bearer >") - .build())) - .build(); -inferredSpans.setTracerProvider(tracerProvider); -``` - -The `setTracerProvider(..)` call shown at the end may be omitted, in that case `GlobalOpenTelemetry` will be used for generating the inferred spans. - - -## Known issues - -### Missing inferred spans - -- After each profiling session, while the stack traces and activation events are processed, no traces are collected. - - Under load, processing can take seconds; ~200ms are normal. - - Log: - ``` - DEBUG Processing {} stack traces - ... - DEBUG Processing traces took {}µs - ``` -- While stack traces are processed, activation events are still put into the ring buffer. However, they don't get processed. If, during this period, there are more activation events than the buffer can handle, we're losing activation events. - - Log: `Could not add activation event to ring buffer as no slots are available` - - Lost activation events can lead to orpaned call trees (lost end event), missing roots (lost start event) and messed up parent/child relationships (lost span activations/deactivations) - Log: - ``` - DEBUG Illegal state ... - ``` -- Under load, the activation event ring buffer can also get full -- The actual `profiling_sampling_interval` might be a bit lower. async-profiler aims to keep the interval relatively consistent but if there are too many threads actively running transactions or if there's a traffic spike, the interval can be lower. -- As a result of the above, some transactions don't contain inferred spans, even if their duration is longer than `profiling_sampling_interval`. - Log: - ``` - DEBUG Created no spans for thread {} (count={}) - ``` -- If the sampling rate is high and there's lots of traffic, the amount of inferred spans may flood the internal queue, leading to lost events (transactions, regular spans, or inferred spans). - Log: - ``` - DEBUG Could not add {} {} to ring buffer as no slots are available - ``` -- The UI currently doesn't favor trace samples with inferred spans -- To find out about how many transactions with inferred spans there are - ``` - POST /apm*/_search - { - "size": 0, - "query": { - "term": { - "span.subtype": { - "value": "inferred" - } - } - }, - "aggs": { - "traces_with_inferred_spans": { - "cardinality": { - "field": "trace.id", - "precision_threshold": 100000 - } - } - } - } - ``` -- There can be a race condition when putting activation events into the queue which leads to older events being in front of newer ones, like `1, 2, 4, 3, 5`. But this is quite infrequent and the consequences are similar to loosing that activation event or event without any consequence. - Log: - ``` - Timestamp of current activation event ({}) is lower than the one from the previous event ({}) - ``` -### Incorrect parent/child relationships - -#### Without workaround - -Inferred span starts after actual span, even though it should be the parent -``` - ---------[inferred ] - [actual] -^ ^ ^ -``` - - -Inferred span ends before actual span, even though it should be the parent -``` -[inferred ]------------ - [actual] -^ ^ ^ -``` - -``` - -------[inferred ]------- [actual ] - [actual ] -> -------[inferred ]------- -^ ^ ^ ^ -``` - -Two consecutive method invocations are interpreted as one longer execution -``` -[actual] [actual] -> [-------- --------] -^ ^ -``` - -#### With workaround - -These are some tricky situations we have managed to find a workaround for. - -##### Regular spans as a child of an inferred span - -This is tricky as regular spans are sent to APM Server right after the event has ended. -Inferred spans are sent later - after the profiling session ends. - -This is how the situation looks like without a workaround: -``` -[transaction ] [transaction ] -└─[inferred ] -> ├─[inferred ] - └─[actual] └───[actual] -``` -There are situations where the ordering is off as a result of that. - -The workaround is that inferred spans have span-links with a special `elastic.is_child` attribute, -pointing to the regular spans they are the parent of. - -##### Parent inferred span ends before child -Workaround: set end timestamp of inferred span to end timestamp of actual span. -``` -[inferred ]-------- [inferred -----]-- - [actual] -> [actual] -^ ^ ^ -``` - -##### Parent inferred span starts after child -Workaround: set start timestamp of inferred span to start timestamp of actual span. -``` - --------[inferred ] --[------inferred ] - [actual ] -> [actual ] -^ ^ ^ -``` - -#### Example - -In this screenshot, we can see several problems at once -inferred spans issues +This extension is a thin wrapper around the [OpenTelemetry inferred-spans extenstions](https://github.com/open-telemetry/opentelemetry-java-contrib/tree/main/inferred-spans). +It preserves backwards compatibility for the deprecated inferred spans configuration options with `ELASTIC_`/`elastic.`-prefix by mapping them to the `OTEL_`/`otel.` options. +instead of using this dependency, you should replace your usages of the deprecated config options and use the upstream dependency directly. diff --git a/inferred-spans/build.gradle.kts b/inferred-spans/build.gradle.kts index 4decd07d..24151c07 100644 --- a/inferred-spans/build.gradle.kts +++ b/inferred-spans/build.gradle.kts @@ -8,13 +8,11 @@ description = "Elastic Inferred Spans extension for OpenTelemetry Java" dependencies { annotationProcessor(libs.autoservice.processor) compileOnly(libs.autoservice.annotations) + // TODO: remove explicit version of dependency and have it managed by the BOM + // once contrib 1.37 is used by the upstream agent + implementation("io.opentelemetry.contrib:opentelemetry-inferred-spans:1.37.0-alpha") compileOnly("io.opentelemetry:opentelemetry-sdk") compileOnly("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure-spi") - compileOnly(libs.findbugs.jsr305) - implementation(libs.lmax.disruptor) - implementation(libs.jctools) - implementation(libs.asyncprofiler) - implementation(libs.bundles.semconv) implementation(project(":common")) testAnnotationProcessor(libs.autoservice.processor) @@ -24,8 +22,6 @@ dependencies { testImplementation("io.opentelemetry:opentelemetry-sdk") testImplementation("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure") testImplementation(libs.awaitility) - testImplementation(libs.github.api) - testImplementation(libs.apachecommons.compress) testImplementation(libs.bundles.semconv) } diff --git a/inferred-spans/src/main/java/co/elastic/otel/InferredSpansConfigMigration.java b/inferred-spans/src/main/java/co/elastic/otel/InferredSpansConfigMigration.java new file mode 100644 index 00000000..ef4370db --- /dev/null +++ b/inferred-spans/src/main/java/co/elastic/otel/InferredSpansConfigMigration.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package co.elastic.otel; + +import com.google.auto.service.AutoService; +import io.opentelemetry.sdk.autoconfigure.spi.AutoConfigurationCustomizer; +import io.opentelemetry.sdk.autoconfigure.spi.AutoConfigurationCustomizerProvider; +import java.util.HashMap; +import java.util.Map; +import java.util.logging.Level; +import java.util.logging.Logger; + +@AutoService(AutoConfigurationCustomizerProvider.class) +public class InferredSpansConfigMigration implements AutoConfigurationCustomizerProvider { + + private static final Logger log = Logger.getLogger(InferredSpansConfigMigration.class.getName()); + + private static final Map CONFIG_MAPPING = new HashMap<>(); + + static { + CONFIG_MAPPING.put("elastic.otel.inferred.spans.enabled", "otel.inferred.spans.enabled"); + CONFIG_MAPPING.put( + "elastic.otel.inferred.spans.logging.enabled", "otel.inferred.spans.logging.enabled"); + CONFIG_MAPPING.put( + "elastic.otel.inferred.spans.backup.diagnostic.files", + "otel.inferred.spans.backup.diagnostic.files"); + CONFIG_MAPPING.put("elastic.otel.inferred.spans.safe.mode", "otel.inferred.spans.safe.mode"); + CONFIG_MAPPING.put( + "elastic.otel.inferred.spans.post.processing.enabled", + "otel.inferred.spans.post.processing.enabled"); + CONFIG_MAPPING.put( + "elastic.otel.inferred.spans.sampling.interval", "otel.inferred.spans.sampling.interval"); + CONFIG_MAPPING.put( + "elastic.otel.inferred.spans.min.duration", "otel.inferred.spans.min.duration"); + CONFIG_MAPPING.put( + "elastic.otel.inferred.spans.included.classes", "otel.inferred.spans.included.classes"); + CONFIG_MAPPING.put( + "elastic.otel.inferred.spans.excluded.classes", "otel.inferred.spans.excluded.classes"); + CONFIG_MAPPING.put("elastic.otel.inferred.spans.interval", "otel.inferred.spans.interval"); + CONFIG_MAPPING.put("elastic.otel.inferred.spans.duration", "otel.inferred.spans.duration"); + CONFIG_MAPPING.put( + "elastic.otel.inferred.spans.lib.directory", "otel.inferred.spans.lib.directory"); + } + + @Override + public void customize(AutoConfigurationCustomizer config) { + config.addPropertiesCustomizer( + props -> { + Map overrides = new HashMap<>(); + for (String oldKey : CONFIG_MAPPING.keySet()) { + String value = props.getString(oldKey); + if (value != null) { + String newKey = CONFIG_MAPPING.get(oldKey); + if (props.getString(newKey) == null) { // new value has not been configured + log.log( + Level.WARNING, + "The configuration property {0} is deprecated, use {1} instead", + new Object[] {oldKey, newKey}); + overrides.put(newKey, value); + } + } + } + return overrides; + }); + } +} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/CallTree.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/CallTree.java deleted file mode 100644 index c6603339..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/CallTree.java +++ /dev/null @@ -1,924 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import static java.util.logging.Level.FINE; -import static java.util.logging.Level.WARNING; - -import co.elastic.otel.common.ElasticAttributes; -import co.elastic.otel.common.util.HexUtils; -import co.elastic.otel.profiler.collections.LongHashSet; -import co.elastic.otel.profiler.pooling.ObjectPool; -import co.elastic.otel.profiler.pooling.Recyclable; -import io.opentelemetry.api.common.Attributes; -import io.opentelemetry.api.trace.Span; -import io.opentelemetry.api.trace.SpanBuilder; -import io.opentelemetry.api.trace.SpanContext; -import io.opentelemetry.api.trace.Tracer; -import io.opentelemetry.context.Context; -import io.opentelemetry.semconv.incubating.CodeIncubatingAttributes; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.logging.Logger; -import javax.annotation.Nullable; - -/** - * Converts a sequence of stack traces into a tree structure of method calls. - * - *
- *             count
- *  b b     a      4
- * aaaa ──► ├─b    1
- *          └─b    1
- * 
- * - *

It also stores information about which span is the parent of a particular call tree node, - * based on which span has been active at that time. - * - *

This allows to infer spans from the call tree which have the correct parent/child - * relationships with the regular spans. - */ -@SuppressWarnings("javadoc") -public class CallTree implements Recyclable { - - private static final int INITIAL_CHILD_SIZE = 2; - - private static final Attributes CHILD_LINK_ATTRIBUTES = - Attributes.builder().put(ElasticAttributes.IS_CHILD, true).build(); - - @Nullable private CallTree parent; - protected int count; - private List children = new ArrayList<>(INITIAL_CHILD_SIZE); - @Nullable private StackFrame frame; - protected long start; - private long lastSeen; - private boolean ended; - private long activationTimestamp = -1; - - /** - * The context of the transaction or span which is the direct parent of this call tree node. Used - * in {@link #spanify} to override the parent. - */ - @Nullable private TraceContext activeContextOfDirectParent; - - private long deactivationTimestamp = -1; - private boolean isSpan; - private int depth; - - @Nullable private ChildList childIds; - - @Nullable private ChildList maybeChildIds; - - public CallTree() {} - - public void set(@Nullable CallTree parent, StackFrame frame, long nanoTime) { - this.parent = parent; - this.frame = frame; - this.start = nanoTime; - if (parent != null) { - this.depth = parent.depth + 1; - } - } - - public boolean isSuccessor(CallTree parent) { - if (depth > parent.depth) { - return getNthParent(depth - parent.depth) == parent; - } - return false; - } - - @Nullable - public CallTree getNthParent(int n) { - CallTree parent = this; - for (int i = 0; i < n; i++) { - if (parent != null) { - parent = parent.parent; - } else { - return null; - } - } - return parent; - } - - public void activation(TraceContext traceContext, long activationTimestamp) { - this.activeContextOfDirectParent = traceContext; - this.activationTimestamp = activationTimestamp; - } - - protected void handleDeactivation( - TraceContext deactivatedSpan, long activationTimestamp, long deactivationTimestamp) { - if (deactivatedSpan.idEquals(activeContextOfDirectParent)) { - this.deactivationTimestamp = deactivationTimestamp; - } else { - CallTree lastChild = getLastChild(); - if (lastChild != null) { - lastChild.handleDeactivation(deactivatedSpan, activationTimestamp, deactivationTimestamp); - } - } - // if an actual child span is deactivated after this call tree node has ended - // it means that this node has actually ended at least at the same point, if not after, the - // actual span has been deactivated - // - // [a(inferred)] ─► [a(inferred) ] ← set end timestamp to timestamp of deactivation of b - // └─[b(actual) ] └─[b(actual) ] - // see also CallTreeTest::testDectivationAfterEnd - if (happenedDuring(activationTimestamp) && happenedAfter(deactivationTimestamp)) { - lastSeen = deactivationTimestamp; - } - } - - private boolean happenedDuring(long timestamp) { - return start <= timestamp && timestamp <= lastSeen; - } - - private boolean happenedAfter(long timestamp) { - return lastSeen < timestamp; - } - - public static CallTree.Root createRoot( - ObjectPool rootPool, byte[] traceContext, long nanoTime) { - CallTree.Root root = rootPool.createInstance(); - root.set(traceContext, nanoTime); - return root; - } - - /** - * Adds a single stack trace to the call tree which either updates the {@link #lastSeen} timestamp - * of an existing call tree node, {@linkplain #end ends} a node, or {@linkplain #addChild adds a - * new child}. - * - * @param stackFrames the stack trace which is iterated over in reverse order - * @param index the current index of {@code stackFrames} - * @param activeSpan the trace context of the currently active span - * @param activationTimestamp the timestamp of when {@code traceContext} has been activated - * @param nanoTime the timestamp of when this stack trace has been recorded - */ - protected CallTree addFrame( - List stackFrames, - int index, - @Nullable TraceContext activeSpan, - long activationTimestamp, - long nanoTime, - ObjectPool callTreePool, - long minDurationNs, - Root root) { - count++; - lastSeen = nanoTime; - // c ee ← traceContext not set - they are not a child of the active span but the frame - // below them - // bbb dd ← traceContext set - // ------ ← all new CallTree during this period should have the traceContext set - // a aaaaaa a - // | | - // active deactive - - // this branch is already aware of the activation - // this means the provided activeSpan is not a direct parent of new child nodes - if (activeSpan != null - && this.activeContextOfDirectParent != null - && this.activeContextOfDirectParent.idEquals(activeSpan)) { - activeSpan = null; - } - - // non-last children are already ended by definition - CallTree lastChild = getLastChild(); - // if the frame corresponding to the last child is not in the stack trace - // it's assumed to have ended one tick ago - CallTree topOfStack = this; - boolean endChild = true; - if (index >= 1) { - final StackFrame frame = stackFrames.get(--index); - if (lastChild != null) { - if (!lastChild.isEnded() && frame.equals(lastChild.frame)) { - topOfStack = - lastChild.addFrame( - stackFrames, - index, - activeSpan, - activationTimestamp, - nanoTime, - callTreePool, - minDurationNs, - root); - endChild = false; - } else { - topOfStack = - addChild( - frame, - stackFrames, - index, - activeSpan, - activationTimestamp, - nanoTime, - callTreePool, - minDurationNs, - root); - } - } else { - topOfStack = - addChild( - frame, - stackFrames, - index, - activeSpan, - activationTimestamp, - nanoTime, - callTreePool, - minDurationNs, - root); - } - } - if (lastChild != null && !lastChild.isEnded() && endChild) { - lastChild.end(callTreePool, minDurationNs, root); - } - transferMaybeChildIdsToChildIds(); - return topOfStack; - } - - /** - * This method is called when we know for sure that the maybe child ids are actually belonging to - * this call tree. This is the case after we've seen another frame represented by this call tree. - * - * @see #addMaybeChildId(long) - */ - private void transferMaybeChildIdsToChildIds() { - if (maybeChildIds != null) { - if (childIds == null) { - childIds = maybeChildIds; - maybeChildIds = null; - } else { - childIds.addAll(maybeChildIds); - maybeChildIds.clear(); - } - } - } - - private CallTree addChild( - StackFrame frame, - List stackFrames, - int index, - @Nullable TraceContext traceContext, - long activationTimestamp, - long nanoTime, - ObjectPool callTreePool, - long minDurationNs, - Root root) { - CallTree callTree = callTreePool.createInstance(); - callTree.set(this, frame, nanoTime); - if (traceContext != null) { - callTree.activation(traceContext, activationTimestamp); - } - children.add(callTree); - return callTree.addFrame( - stackFrames, index, null, activationTimestamp, nanoTime, callTreePool, minDurationNs, root); - } - - long getDurationUs() { - return getDurationNs() / 1000; - } - - private long getDurationNs() { - return lastSeen - start; - } - - public int getCount() { - return count; - } - - @Nullable - public StackFrame getFrame() { - return frame; - } - - public List getChildren() { - return children; - } - - protected void end(ObjectPool pool, long minDurationNs, Root root) { - ended = true; - // if the parent span has already been deactivated before this call tree node has ended - // it means that this node is actually the parent of the already deactivated span - // make b parent of a and pre-date the start of b to the activation of a - // [a(inferred) ] [a(inferred) ] - // [1 ] ──┐ [b(inferred) ] - // └[b(inferred)] │ [c(inferred)] - // [c(infer.) ] └► [1 ] - // └─[d(i.)] └──[d(i.)] - // see also CallTreeTest::testDeactivationBeforeEnd - if (deactivationHappenedBeforeEnd()) { - start = Math.min(activationTimestamp, start); - if (parent != null) { - // we know there's always exactly one activation in the parent's childIds - // that needs to be transferred to this call tree node - // in the above example, 1's child id would be first transferred from a to b and then from b - // to c - // this ensures that the UI knows that c is the parent of 1 - parent.giveLastChildIdTo(this); - } - - List callTrees = getChildren(); - for (int i = 0, size = callTrees.size(); i < size; i++) { - CallTree child = callTrees.get(i); - child.activation(activeContextOfDirectParent, activationTimestamp); - child.deactivationTimestamp = deactivationTimestamp; - // re-run this logic for all children, even if they have already ended - child.end(pool, minDurationNs, root); - } - activeContextOfDirectParent = null; - activationTimestamp = -1; - deactivationTimestamp = -1; - } - if (parent != null && isTooFast(minDurationNs)) { - root.previousTopOfStack = parent; - parent.removeChild(pool, this); - } else { - CallTree lastChild = getLastChild(); - if (lastChild != null && !lastChild.isEnded()) { - lastChild.end(pool, minDurationNs, root); - } - } - } - - private boolean isTooFast(long minDurationNs) { - return count == 1 || isFasterThan(minDurationNs); - } - - private void removeChild(ObjectPool pool, CallTree child) { - children.remove(child); - child.recursiveGiveChildIdsTo(this); - child.recycle(pool); - } - - private boolean isFasterThan(long minDurationNs) { - return getDurationNs() < minDurationNs; - } - - private boolean deactivationHappenedBeforeEnd() { - return activeContextOfDirectParent != null - && deactivationTimestamp > -1 - && lastSeen > deactivationTimestamp; - } - - public boolean isLeaf() { - return children.isEmpty(); - } - - /** - * Returns {@code true} if this node has just one child and no self time. - * - *

-   *  c
-   *  b  ← b is a pillar
-   * aaa
-   * 
- */ - private boolean isPillar() { - return children.size() == 1 && children.get(0).count == count; - } - - @Nullable - public CallTree getLastChild() { - return children.size() > 0 ? children.get(children.size() - 1) : null; - } - - public boolean isEnded() { - return ended; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - try { - toString(sb); - } catch (IOException e) { - throw new RuntimeException(e); - } - return sb.toString(); - } - - private void toString(Appendable out) throws IOException { - toString(out, 0); - } - - private void toString(Appendable out, int level) throws IOException { - for (int i = 0; i < level; i++) { - out.append(" "); - } - out.append(frame != null ? frame.getClassName() : "null") - .append('.') - .append(frame != null ? frame.getMethodName() : "null") - .append(' ') - .append(Integer.toString(count)) - .append('\n'); - for (CallTree node : children) { - node.toString(out, level + 1); - } - } - - int spanify( - CallTree.Root root, - @Nullable Span parentSpan, - TraceContext parentContext, - SpanAnchoredClock clock, - StringBuilder tempBuilder, - Tracer tracer) { - int createdSpans = 0; - if (activeContextOfDirectParent != null) { - parentSpan = null; - parentContext = activeContextOfDirectParent; - } - Span span = null; - if (!isPillar() || isLeaf()) { - createdSpans++; - span = asSpan(root, parentSpan, parentContext, tracer, clock, tempBuilder); - this.isSpan = true; - } - List children = getChildren(); - for (int i = 0, size = children.size(); i < size; i++) { - createdSpans += - children - .get(i) - .spanify( - root, - span != null ? span : parentSpan, - parentContext, - clock, - tempBuilder, - tracer); - } - return createdSpans; - } - - protected Span asSpan( - Root root, - @Nullable Span parentSpan, - TraceContext parentContext, - Tracer tracer, - SpanAnchoredClock clock, - StringBuilder tempBuilder) { - - Context parentOtelCtx; - if (parentSpan != null) { - parentOtelCtx = Context.root().with(parentSpan); - } else { - tempBuilder.setLength(0); - parentOtelCtx = Context.root().with(Span.wrap(parentContext.toOtelSpanContext(tempBuilder))); - } - - tempBuilder.setLength(0); - String classFqn = frame.getClassName(); - if (classFqn != null) { - tempBuilder.append(classFqn, frame.getSimpleClassNameOffset(), classFqn.length()); - } else { - tempBuilder.append("null"); - } - tempBuilder.append("#"); - tempBuilder.append(frame.getMethodName()); - - transferMaybeChildIdsToChildIds(); - - SpanBuilder spanBuilder = - tracer - .spanBuilder(tempBuilder.toString()) - .setParent(parentOtelCtx) - .setAttribute(ElasticAttributes.IS_INFERRED, true) - .setStartTimestamp( - clock.toEpochNanos(parentContext.getClockAnchor(), this.start), - TimeUnit.NANOSECONDS); - insertChildIdLinks( - spanBuilder, Span.fromContext(parentOtelCtx).getSpanContext(), parentContext, tempBuilder); - - // we're not interested in the very bottom of the stack which contains things like accepting and - // handling connections - if (parentSpan != null || !root.rootContext.idEquals(parentContext)) { - // we're never spanifying the root - assert this.parent != null; - tempBuilder.setLength(0); - this.parent.fillStackTrace(tempBuilder); - spanBuilder.setAttribute(CodeIncubatingAttributes.CODE_STACKTRACE, tempBuilder.toString()); - } - - Span span = spanBuilder.startSpan(); - span.end( - clock.toEpochNanos(parentContext.getClockAnchor(), this.start + getDurationNs()), - TimeUnit.NANOSECONDS); - return span; - } - - private void insertChildIdLinks( - SpanBuilder span, - SpanContext parentContext, - TraceContext nonInferredParent, - StringBuilder tempBuilder) { - if (childIds == null || childIds.isEmpty()) { - return; - } - for (int i = 0; i < childIds.getSize(); i++) { - // to avoid cycles, we only insert child-ids if the parent of the child is also - // the parent of the stack of inferred spans inserted - if (nonInferredParent.getSpanId() == childIds.getParentId(i)) { - tempBuilder.setLength(0); - HexUtils.appendLongAsHex(childIds.getId(i), tempBuilder); - SpanContext spanContext = - SpanContext.create( - parentContext.getTraceId(), - tempBuilder.toString(), - parentContext.getTraceFlags(), - parentContext.getTraceState()); - span.addLink(spanContext, CHILD_LINK_ATTRIBUTES); - } - } - } - - /** Fill in the stack trace up to the parent span */ - private void fillStackTrace(StringBuilder resultBuilder) { - if (parent != null && !this.isSpan) { - if (resultBuilder.length() > 0) { - resultBuilder.append('\n'); - } - resultBuilder - .append("at ") - .append(frame.getClassName()) - .append('.') - .append(frame.getMethodName()) - .append('('); - frame.appendFileName(resultBuilder); - resultBuilder.append(')'); - parent.fillStackTrace(resultBuilder); - } - } - - /** - * Recycles this subtree to the provided pool recursively. Note that this method ends by recycling - * {@code this} node (i.e. - this subtree root), which means that the caller of this method - * should make sure that no reference to this object is held anywhere. - * - *

ALSO NOTE: MAKE SURE NOT TO CALL THIS METHOD FOR {@link CallTree.Root} INSTANCES. - * - * @param pool the pool to which all subtree nodes are to be recycled - */ - public final void recycle(ObjectPool pool) { - assert !(this instanceof Root); - List children = this.children; - for (int i = 0, size = children.size(); i < size; i++) { - children.get(i).recycle(pool); - } - pool.recycle(this); - } - - @Override - public void resetState() { - parent = null; - count = 0; - frame = null; - start = 0; - lastSeen = 0; - ended = false; - activationTimestamp = -1; - activeContextOfDirectParent = null; - deactivationTimestamp = -1; - isSpan = false; - childIds = null; - maybeChildIds = null; - depth = 0; - if (children.size() > INITIAL_CHILD_SIZE) { - // the overwhelming majority of call tree nodes has either one or two children - // don't let outliers grow all lists in the pool over time - children = new ArrayList<>(INITIAL_CHILD_SIZE); - } else { - children.clear(); - } - } - - /** - * When a regular span is activated, we want it's {@code span.id} to be added to the call tree - * that represents the {@linkplain CallTree.Root#topOfStack top of the stack} to ensure correct - * parent/child relationships via re-parenting. - * - *

However, the {@linkplain CallTree.Root#topOfStack current top of the stack} may turn out to - * not be the right target. Consider this example: - * - *

-   * bb
-   * aa aa
-   *   1  1  ← activation
-   * 
- * - *

We would add the id of span {@code 1} to {@code b}'s {@link #maybeChildIds}. But after - * seeing the next frame, we realize the {@code b} has already ended and that we should {@link - * #giveMaybeChildIdsTo} from {@code b} and give it to {@code a}. This logic is implemented in - * {@link CallTree.Root#addStackTrace}. After seeing another frame of {@code a}, we know that - * {@code 1} is really the child of {@code a}, so we {@link #transferMaybeChildIdsToChildIds()}. - * - * @param id the child span id to add to this call tree element - */ - public void addMaybeChildId(long id, long parentId) { - if (maybeChildIds == null) { - maybeChildIds = new ChildList(); - } - maybeChildIds.add(id, parentId); - } - - public void addChildId(long id, long parentId) { - if (childIds == null) { - childIds = new ChildList(); - } - childIds.add(id, parentId); - } - - public boolean hasChildIds() { - return (maybeChildIds != null && maybeChildIds.getSize() > 0) - || (childIds != null && childIds.getSize() > 0); - } - - public void recursiveGiveChildIdsTo(CallTree giveTo) { - for (int i = 0, childrenSize = children.size(); i < childrenSize; i++) { - children.get(i).recursiveGiveChildIdsTo(giveTo); - } - giveChildIdsTo(giveTo); - giveMaybeChildIdsTo(giveTo); - } - - void giveChildIdsTo(CallTree giveTo) { - if (this.childIds == null) { - return; - } - if (giveTo.childIds == null) { - giveTo.childIds = this.childIds; - } else { - giveTo.childIds.addAll(this.childIds); - } - this.childIds = null; - } - - void giveLastChildIdTo(CallTree giveTo) { - if (childIds != null && !childIds.isEmpty()) { - int size = childIds.getSize(); - long id = childIds.getId(size - 1); - long parentId = childIds.getParentId(size - 1); - giveTo.addChildId(id, parentId); - childIds.removeLast(); - } - } - - void giveMaybeChildIdsTo(CallTree giveTo) { - if (this.maybeChildIds == null) { - return; - } - if (giveTo.maybeChildIds == null) { - giveTo.maybeChildIds = this.maybeChildIds; - } else { - giveTo.maybeChildIds.addAll(this.maybeChildIds); - } - this.maybeChildIds = null; - } - - public int getDepth() { - return depth; - } - - /** - * A special kind of a {@link CallTree} node which represents the root of the call tree. This acts - * as the interface to the outside to add new nodes to the tree or to update existing ones by - * {@linkplain #addStackTrace adding stack traces}. - */ - public static class Root extends CallTree implements Recyclable { - private static final Logger logger = Logger.getLogger(Root.class.getName()); - private static final StackFrame ROOT_FRAME = new StackFrame("root", "root"); - - /** - * The context of the thread root, mostly a transaction or a span which got activated in an - * auxiliary thread - */ - protected TraceContext rootContext; - - /** - * The context of the transaction or span which is currently active. This is lazily deserialized - * from {@link #activeSpanSerialized} if there's an actual {@linkplain #addStackTrace stack - * trace} for this activation. - */ - @Nullable private TraceContext activeSpan; - - /** The timestamp of when {@link #activeSpan} got activated */ - private long activationTimestamp = -1; - - /** - * The context of the transaction or span which is currently active, in its {@linkplain - * TraceContext#serialize serialized} form. - */ - private byte[] activeSpanSerialized = new byte[TraceContext.SERIALIZED_LENGTH]; - - @Nullable private CallTree previousTopOfStack; - @Nullable private CallTree topOfStack; - - private final LongHashSet activeSet = new LongHashSet(); - - public Root() { - this.rootContext = new TraceContext(); - } - - private void set(byte[] traceContext, long nanoTime) { - super.set(null, ROOT_FRAME, nanoTime); - this.rootContext.deserialize(traceContext); - setActiveSpan(traceContext, nanoTime); - } - - public void setActiveSpan(byte[] activeSpanSerialized, long timestamp) { - activationTimestamp = timestamp; - System.arraycopy( - activeSpanSerialized, 0, this.activeSpanSerialized, 0, activeSpanSerialized.length); - this.activeSpan = null; - } - - public void onActivation(byte[] active, long timestamp) { - setActiveSpan(active, timestamp); - if (topOfStack != null) { - long spanId = TraceContext.getSpanId(active); - activeSet.add(spanId); - if (!isNestedActivation(topOfStack)) { - topOfStack.addMaybeChildId(spanId, TraceContext.getParentId(active)); - } - } - } - - private boolean isNestedActivation(CallTree topOfStack) { - return isAnyActive(topOfStack.childIds) || isAnyActive(topOfStack.maybeChildIds); - } - - private boolean isAnyActive(@Nullable ChildList spanIds) { - if (spanIds == null) { - return false; - } - for (int i = 0, size = spanIds.getSize(); i < size; i++) { - if (activeSet.contains(spanIds.getId(i))) { - return true; - } - } - return false; - } - - public void onDeactivation(byte[] deactivated, byte[] active, long timestamp) { - if (logger.isLoggable(FINE) && !Arrays.equals(activeSpanSerialized, deactivated)) { - logger.log(WARNING, "Illegal state: deactivating span that is not active"); - } - if (activeSpan != null) { - handleDeactivation(activeSpan, activationTimestamp, timestamp); - } - // else: activeSpan has not been materialized because no stack traces were added during this - // activation - setActiveSpan(active, timestamp); - // we're not interested in tracking nested activations that happen before we see the first - // stack trace - // that's because isNestedActivation is only called if topOfStack != null - // this optimizes for the case where we have no stack traces for a fast executing transaction - if (topOfStack != null) { - long spanId = TraceContext.getSpanId(deactivated); - activeSet.remove(spanId); - } - } - - public void addStackTrace( - List stackTrace, - long nanoTime, - ObjectPool callTreePool, - long minDurationNs) { - // only "materialize" trace context if there's actually an associated stack trace to the - // activation - // avoids allocating a TraceContext for very short activations which have no effect on the - // CallTree anyway - boolean firstFrameAfterActivation = false; - if (activeSpan == null) { - firstFrameAfterActivation = true; - activeSpan = new TraceContext(); - activeSpan.deserialize(activeSpanSerialized); - } - previousTopOfStack = topOfStack; - topOfStack = - addFrame( - stackTrace, - stackTrace.size(), - activeSpan, - activationTimestamp, - nanoTime, - callTreePool, - minDurationNs, - this); - - // After adding the first frame after an activation, we can check if we added the child ids to - // the correct CallTree - // If the new top of stack is not a successor (a different branch vs just added nodes on the - // same branch) - // we have to transfer the child ids of not yet deactivated spans to the new top of the stack. - // See also CallTreeTest.testActivationAfterMethodEnds and following tests. - if (firstFrameAfterActivation - && previousTopOfStack != topOfStack - && previousTopOfStack != null - && previousTopOfStack.hasChildIds()) { - if (!topOfStack.isSuccessor(previousTopOfStack)) { - CallTree commonAncestor = findCommonAncestor(previousTopOfStack, topOfStack); - CallTree newParent = commonAncestor != null ? commonAncestor : topOfStack; - if (newParent.count > 1) { - previousTopOfStack.giveMaybeChildIdsTo(newParent); - } else if (previousTopOfStack.maybeChildIds != null) { - previousTopOfStack.maybeChildIds.clear(); - } - } - } - } - - @Nullable - private CallTree findCommonAncestor(CallTree previousTopOfStack, CallTree topOfStack) { - int maxDepthOfCommonAncestor = Math.min(previousTopOfStack.getDepth(), topOfStack.getDepth()); - CallTree commonAncestor = null; - // i = 1 avoids considering the CallTree.Root node which is always the same - for (int i = 1; i <= maxDepthOfCommonAncestor; i++) { - CallTree ancestor1 = previousTopOfStack.getNthParent(previousTopOfStack.getDepth() - i); - CallTree ancestor2 = topOfStack.getNthParent(topOfStack.getDepth() - i); - if (ancestor1 == ancestor2) { - commonAncestor = ancestor1; - } else { - break; - } - } - return commonAncestor; - } - - /** - * Creates spans for call tree nodes if they are either not a {@linkplain #isPillar() pillar} or - * are a {@linkplain #isLeaf() leaf}. Nodes which are not converted to {@link Span}s are part of - * the span stackframes for the nodes which do get converted to a span. - * - *

Parent/child relationships with the regular spans are maintained. One exception is that an - * inferred span can't be the parent of a regular span. That is because the regular spans have - * already been reported once the inferred spans are created. In the future, we might make it - * possible to update the parent ID of a regular span so that it correctly reflects being a - * child of an inferred span. - */ - public int spanify(SpanAnchoredClock clock, Tracer tracer) { - StringBuilder tempBuilder = new StringBuilder(); - int createdSpans = 0; - List callTrees = getChildren(); - for (int i = 0, size = callTrees.size(); i < size; i++) { - createdSpans += - callTrees.get(i).spanify(this, null, rootContext, clock, tempBuilder, tracer); - } - return createdSpans; - } - - public TraceContext getRootContext() { - return rootContext; - } - - /** - * Recycles this tree to the provided pools. First, all child subtrees are recycled recursively - * to the children pool. Then, {@code this} root node is recycled to the root pool. This means - * that the caller of this method should make sure that no reference to this root object is - * held anywhere. - * - * @param childrenPool object pool for all non-root nodes - * @param rootPool object pool for root nodes - */ - public void recycle(ObjectPool childrenPool, ObjectPool rootPool) { - List children = getChildren(); - for (int i = 0, size = children.size(); i < size; i++) { - children.get(i).recycle(childrenPool); - } - rootPool.recycle(this); - } - - public void end(ObjectPool pool, long minDurationNs) { - end(pool, minDurationNs, this); - } - - @Override - public void resetState() { - super.resetState(); - rootContext.resetState(); - activeSpan = null; - activationTimestamp = -1; - Arrays.fill(activeSpanSerialized, (byte) 0); - previousTopOfStack = null; - topOfStack = null; - activeSet.clear(); - } - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/ChildList.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/ChildList.java deleted file mode 100644 index 7b7a62db..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/ChildList.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import co.elastic.otel.profiler.collections.LongList; - -/** List for maintaining pairs of (spanId,parentIds) both represented as longs. */ -public class ChildList { - - // this list contains the (spanId,parentIds) flattened - private LongList idsWithParentIds = new LongList(); - - public void add(long id, long parentId) { - idsWithParentIds.add(id); - idsWithParentIds.add(parentId); - } - - public long getId(int index) { - return idsWithParentIds.get(index * 2); - } - - public long getParentId(int index) { - return idsWithParentIds.get(index * 2 + 1); - } - - public int getSize() { - return idsWithParentIds.getSize() / 2; - } - - public void addAll(ChildList other) { - idsWithParentIds.addAll(other.idsWithParentIds); - } - - public void clear() { - idsWithParentIds.clear(); - } - - public boolean isEmpty() { - return getSize() == 0; - } - - public void removeLast() { - int size = idsWithParentIds.getSize(); - idsWithParentIds.remove(size - 1); - idsWithParentIds.remove(size - 2); - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/InferredSpansAutoConfig.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/InferredSpansAutoConfig.java deleted file mode 100644 index fa821f7d..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/InferredSpansAutoConfig.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import co.elastic.otel.common.config.PropertiesApplier; -import com.google.auto.service.AutoService; -import io.opentelemetry.sdk.autoconfigure.spi.AutoConfigurationCustomizer; -import io.opentelemetry.sdk.autoconfigure.spi.AutoConfigurationCustomizerProvider; -import java.util.logging.Logger; - -@AutoService(AutoConfigurationCustomizerProvider.class) -public class InferredSpansAutoConfig implements AutoConfigurationCustomizerProvider { - - private static final Logger log = Logger.getLogger(InferredSpansAutoConfig.class.getName()); - - static final String ENABLED_OPTION = "elastic.otel.inferred.spans.enabled"; - static final String LOGGING_OPTION = "elastic.otel.inferred.spans.logging.enabled"; - static final String DIAGNOSTIC_FILES_OPTION = - "elastic.otel.inferred.spans.backup.diagnostic.files"; - static final String SAFEMODE_OPTION = "elastic.otel.inferred.spans.safe.mode"; - static final String POSTPROCESSING_OPTION = "elastic.otel.inferred.spans.post.processing.enabled"; - static final String SAMPLING_INTERVAL_OPTION = "elastic.otel.inferred.spans.sampling.interval"; - static final String MIN_DURATION_OPTION = "elastic.otel.inferred.spans.min.duration"; - static final String INCLUDED_CLASSES_OPTION = "elastic.otel.inferred.spans.included.classes"; - static final String EXCLUDED_CLASSES_OPTION = "elastic.otel.inferred.spans.excluded.classes"; - static final String INTERVAL_OPTION = "elastic.otel.inferred.spans.interval"; - static final String DURATION_OPTION = "elastic.otel.inferred.spans.duration"; - static final String LIB_DIRECTORY_OPTION = "elastic.otel.inferred.spans.lib.directory"; - - @Override - public void customize(AutoConfigurationCustomizer config) { - config.addTracerProviderCustomizer( - (providerBuilder, properties) -> { - if (properties.getBoolean(ENABLED_OPTION, false)) { - InferredSpansProcessorBuilder builder = InferredSpansProcessor.builder(); - - PropertiesApplier applier = new PropertiesApplier(properties); - - applier.applyBool(LOGGING_OPTION, builder::profilerLoggingEnabled); - applier.applyBool(DIAGNOSTIC_FILES_OPTION, builder::backupDiagnosticFiles); - applier.applyInt(SAFEMODE_OPTION, builder::asyncProfilerSafeMode); - applier.applyBool(POSTPROCESSING_OPTION, builder::postProcessingEnabled); - applier.applyDuration(SAMPLING_INTERVAL_OPTION, builder::samplingInterval); - applier.applyDuration(MIN_DURATION_OPTION, builder::inferredSpansMinDuration); - applier.applyWildcards(INCLUDED_CLASSES_OPTION, builder::includedClasses); - applier.applyWildcards(EXCLUDED_CLASSES_OPTION, builder::excludedClasses); - applier.applyDuration(INTERVAL_OPTION, builder::profilerInterval); - applier.applyDuration(DURATION_OPTION, builder::profilingDuration); - applier.applyString(LIB_DIRECTORY_OPTION, builder::profilerLibDirectory); - - providerBuilder.addSpanProcessor(builder.build()); - } else { - log.finest( - "Not enabling inferred spans processor because " + ENABLED_OPTION + " is not set"); - } - return providerBuilder; - }); - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/InferredSpansConfiguration.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/InferredSpansConfiguration.java deleted file mode 100644 index a7775d8e..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/InferredSpansConfiguration.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import co.elastic.otel.common.config.WildcardMatcher; -import java.time.Duration; -import java.util.List; - -public class InferredSpansConfiguration { - - private final boolean profilerLoggingEnabled; - private final boolean backupDiagnosticFiles; - private final int asyncProfilerSafeMode; - private final boolean postProcessingEnabled; - private final Duration samplingInterval; - private final Duration inferredSpansMinDuration; - private final List includedClasses; - private final List excludedClasses; - private final Duration profilerInterval; - private final Duration profilingDuration; - private final String profilerLibDirectory; - - InferredSpansConfiguration( - boolean profilerLoggingEnabled, - boolean backupDiagnosticFiles, - int asyncProfilerSafeMode, - boolean postProcessingEnabled, - Duration samplingInterval, - Duration inferredSpansMinDuration, - List includedClasses, - List excludedClasses, - Duration profilerInterval, - Duration profilingDuration, - String profilerLibDirectory) { - this.profilerLoggingEnabled = profilerLoggingEnabled; - this.backupDiagnosticFiles = backupDiagnosticFiles; - this.asyncProfilerSafeMode = asyncProfilerSafeMode; - this.postProcessingEnabled = postProcessingEnabled; - this.samplingInterval = samplingInterval; - this.inferredSpansMinDuration = inferredSpansMinDuration; - this.includedClasses = includedClasses; - this.excludedClasses = excludedClasses; - this.profilerInterval = profilerInterval; - this.profilingDuration = profilingDuration; - this.profilerLibDirectory = profilerLibDirectory; - } - - public static InferredSpansProcessorBuilder builder() { - return new InferredSpansProcessorBuilder(); - } - - public boolean isProfilingLoggingEnabled() { - return profilerLoggingEnabled; - } - - public int getAsyncProfilerSafeMode() { - return asyncProfilerSafeMode; - } - - public Duration getSamplingInterval() { - return samplingInterval; - } - - public Duration getInferredSpansMinDuration() { - return inferredSpansMinDuration; - } - - public List getIncludedClasses() { - return includedClasses; - } - - public List getExcludedClasses() { - return excludedClasses; - } - - public Duration getProfilingInterval() { - return profilerInterval; - } - - public Duration getProfilingDuration() { - return profilingDuration; - } - - public boolean isNonStopProfiling() { - return getProfilingDuration().toMillis() >= getProfilingInterval().toMillis(); - } - - public boolean isBackupDiagnosticFiles() { - return backupDiagnosticFiles; - } - - public String getProfilerLibDirectory() { - return profilerLibDirectory == null || profilerLibDirectory.isEmpty() - ? System.getProperty("java.io.tmpdir") - : profilerLibDirectory; - } - - public boolean isPostProcessingEnabled() { - return postProcessingEnabled; - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/InferredSpansProcessor.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/InferredSpansProcessor.java deleted file mode 100644 index 6034a5b5..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/InferredSpansProcessor.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import co.elastic.otel.InferredSpansVersion; -import co.elastic.otel.common.util.ExecutorUtils; -import io.opentelemetry.api.GlobalOpenTelemetry; -import io.opentelemetry.api.trace.Tracer; -import io.opentelemetry.api.trace.TracerProvider; -import io.opentelemetry.context.Context; -import io.opentelemetry.sdk.common.CompletableResultCode; -import io.opentelemetry.sdk.trace.ReadWriteSpan; -import io.opentelemetry.sdk.trace.ReadableSpan; -import io.opentelemetry.sdk.trace.SpanProcessor; -import java.io.File; -import java.util.concurrent.Executors; -import java.util.concurrent.ThreadFactory; -import java.util.logging.Level; -import java.util.logging.Logger; -import javax.annotation.Nullable; - -public class InferredSpansProcessor implements SpanProcessor { - - private static final Logger logger = Logger.getLogger(InferredSpansProcessor.class.getName()); - - public static final String TRACER_NAME = "elastic-inferred-spans"; - - // Visible for testing - final SamplingProfiler profiler; - - private Tracer tracer; - - InferredSpansProcessor( - InferredSpansConfiguration config, - SpanAnchoredClock clock, - boolean startScheduledProfiling, - @Nullable File activationEventsFile, - @Nullable File jfrFile) { - profiler = new SamplingProfiler(config, clock, this::getTracer, activationEventsFile, jfrFile); - if (startScheduledProfiling) { - profiler.start(); - } - } - - public static InferredSpansProcessorBuilder builder() { - return new InferredSpansProcessorBuilder(); - } - - /** - * @param provider the provider to use. Null means that {@link GlobalOpenTelemetry} will be used - * lazily. - */ - public synchronized void setTracerProvider(TracerProvider provider) { - tracer = provider.get(TRACER_NAME, InferredSpansVersion.VERSION); - } - - @Override - public void onStart(Context parentContext, ReadWriteSpan span) { - profiler.getClock().onSpanStart(span, parentContext); - } - - @Override - public boolean isStartRequired() { - return true; - } - - @Override - public void onEnd(ReadableSpan span) {} - - @Override - public boolean isEndRequired() { - return false; - } - - @Override - public CompletableResultCode shutdown() { - CompletableResultCode result = new CompletableResultCode(); - logger.fine("Stopping Inferred Spans Processor"); - ThreadFactory threadFactory = ExecutorUtils.threadFactory("inferred-spans-shtudown", false); - Executors.newSingleThreadExecutor(threadFactory) - .submit( - () -> { - try { - profiler.stop(); - result.succeed(); - } catch (Exception e) { - logger.log(Level.SEVERE, "Failed to stop Inferred Spans Processor", e); - result.fail(); - } - }); - return result; - } - - private Tracer getTracer() { - if (tracer == null) { - synchronized (this) { - if (tracer == null) { - setTracerProvider(GlobalOpenTelemetry.get().getTracerProvider()); - } - } - } - return tracer; - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/InferredSpansProcessorBuilder.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/InferredSpansProcessorBuilder.java deleted file mode 100644 index 528eccef..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/InferredSpansProcessorBuilder.java +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import co.elastic.otel.common.config.WildcardMatcher; -import java.io.File; -import java.time.Duration; -import java.util.Arrays; -import java.util.List; -import javax.annotation.Nullable; - -public class InferredSpansProcessorBuilder { - private boolean profilerLoggingEnabled = true; - private boolean backupDiagnosticFiles = false; - private int asyncProfilerSafeMode = 0; - private boolean postProcessingEnabled = true; - private Duration samplingInterval = Duration.ofMillis(50); - private Duration inferredSpansMinDuration = Duration.ZERO; - private List includedClasses = WildcardMatcher.matchAllList(); - private List excludedClasses = - Arrays.asList( - WildcardMatcher.caseSensitiveMatcher("java.*"), - WildcardMatcher.caseSensitiveMatcher("javax.*"), - WildcardMatcher.caseSensitiveMatcher("sun.*"), - WildcardMatcher.caseSensitiveMatcher("com.sun.*"), - WildcardMatcher.caseSensitiveMatcher("jdk.*"), - WildcardMatcher.caseSensitiveMatcher("org.apache.tomcat.*"), - WildcardMatcher.caseSensitiveMatcher("org.apache.catalina.*"), - WildcardMatcher.caseSensitiveMatcher("org.apache.coyote.*"), - WildcardMatcher.caseSensitiveMatcher("org.jboss.as.*"), - WildcardMatcher.caseSensitiveMatcher("org.glassfish.*"), - WildcardMatcher.caseSensitiveMatcher("org.eclipse.jetty.*"), - WildcardMatcher.caseSensitiveMatcher("com.ibm.websphere.*"), - WildcardMatcher.caseSensitiveMatcher("io.undertow.*")); - private Duration profilerInterval = Duration.ofSeconds(5); - private Duration profilingDuration = Duration.ofSeconds(5); - private String profilerLibDirectory = null; - - // The following options are only intended to be modified in tests - private SpanAnchoredClock clock = new SpanAnchoredClock(); - private boolean startScheduledProfiling = true; - private @Nullable File activationEventsFile = null; - private @Nullable File jfrFile = null; - - InferredSpansProcessorBuilder() {} - - public InferredSpansProcessor build() { - InferredSpansConfiguration config = - new InferredSpansConfiguration( - profilerLoggingEnabled, - backupDiagnosticFiles, - asyncProfilerSafeMode, - postProcessingEnabled, - samplingInterval, - inferredSpansMinDuration, - includedClasses, - excludedClasses, - profilerInterval, - profilingDuration, - profilerLibDirectory); - return new InferredSpansProcessor( - config, clock, startScheduledProfiling, activationEventsFile, jfrFile); - } - - /** - * By default, async profiler prints warning messages about missing JVM symbols to standard - * output. Set this option to {@code true} to suppress such messages - */ - public InferredSpansProcessorBuilder profilerLoggingEnabled(boolean profilerLoggingEnabled) { - this.profilerLoggingEnabled = profilerLoggingEnabled; - return this; - } - - public InferredSpansProcessorBuilder backupDiagnosticFiles(boolean backupDiagnosticFiles) { - this.backupDiagnosticFiles = backupDiagnosticFiles; - return this; - } - - /** - * Can be used for analysis: the Async Profiler's area that deals with recovering stack trace - * frames is known to be sensitive in some systems. It is used as a bit mask using values are - * between 0 and 31, where 0 enables all recovery attempts and 31 disables all five (corresponding - * 1, 2, 4, 8 and 16). - */ - public InferredSpansProcessorBuilder asyncProfilerSafeMode(int asyncProfilerSafeMode) { - this.asyncProfilerSafeMode = asyncProfilerSafeMode; - return this; - } - - /** - * Can be used to test the effect of the async-profiler in isolation from the agent's - * post-processing. - */ - public InferredSpansProcessorBuilder postProcessingEnabled(boolean postProcessingEnabled) { - this.postProcessingEnabled = postProcessingEnabled; - return this; - } - - /** - * The frequency at which stack traces are gathered within a profiling session. The lower you set - * it, the more accurate the durations will be. This comes at the expense of higher overhead and - * more spans for potentially irrelevant operations. The minimal duration of a profiling-inferred - * span is the same as the value of this setting. - */ - public InferredSpansProcessorBuilder samplingInterval(Duration samplingInterval) { - this.samplingInterval = samplingInterval; - return this; - } - - /** - * The minimum duration of an inferred span. Note that the min duration is also implicitly set by - * the sampling interval. However, increasing the sampling interval also decreases the accuracy of - * the duration of inferred spans. - */ - public InferredSpansProcessorBuilder inferredSpansMinDuration(Duration inferredSpansMinDuration) { - this.inferredSpansMinDuration = inferredSpansMinDuration; - return this; - } - - /** - * If set, the agent will only create inferred spans for methods which match this list. Setting a - * value may slightly reduce overhead and can reduce clutter by only creating spans for the - * classes you are interested in. Example: org.example.myapp.* - */ - public InferredSpansProcessorBuilder includedClasses(List includedClasses) { - this.includedClasses = includedClasses; - return this; - } - - /** Excludes classes for which no profiler-inferred spans should be created. */ - public InferredSpansProcessorBuilder excludedClasses(List excludedClasses) { - this.excludedClasses = excludedClasses; - return this; - } - - /** The interval at which profiling sessions should be started. */ - public InferredSpansProcessorBuilder profilerInterval(Duration profilerInterval) { - this.profilerInterval = profilerInterval; - return this; - } - - /** - * The duration of a profiling session. For sampled transactions which fall within a profiling - * session (they start after and end before the session), so-called inferred spans will be - * created. They appear in the trace waterfall view like regular spans. NOTE: It is not - * recommended to set much higher durations as it may fill the activation events file and - * async-profiler's frame buffer. Warnings will be logged if the activation events file is full. - * If you want to have more profiling coverage, try decreasing {@link - * #profilerInterval(Duration)}. - */ - public InferredSpansProcessorBuilder profilingDuration(Duration profilingDuration) { - this.profilingDuration = profilingDuration; - return this; - } - - public InferredSpansProcessorBuilder profilerLibDirectory(String profilerLibDirectory) { - this.profilerLibDirectory = profilerLibDirectory; - return this; - } - - /** For testing only. */ - InferredSpansProcessorBuilder clock(SpanAnchoredClock clock) { - this.clock = clock; - return this; - } - - /** For testing only. */ - InferredSpansProcessorBuilder startScheduledProfiling(boolean startScheduledProfiling) { - this.startScheduledProfiling = startScheduledProfiling; - return this; - } - - /** For testing only. */ - InferredSpansProcessorBuilder activationEventsFile(@Nullable File activationEventsFile) { - this.activationEventsFile = activationEventsFile; - return this; - } - - /** For testing only. */ - InferredSpansProcessorBuilder jfrFile(@Nullable File jfrFile) { - this.jfrFile = jfrFile; - return this; - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/ProfilingActivationListener.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/ProfilingActivationListener.java deleted file mode 100644 index d1cad13e..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/ProfilingActivationListener.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import co.elastic.otel.profiler.util.ThreadUtils; -import io.opentelemetry.api.trace.Span; -import io.opentelemetry.api.trace.SpanContext; -import io.opentelemetry.context.Context; -import io.opentelemetry.context.ContextStorage; -import io.opentelemetry.context.Scope; -import java.io.Closeable; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import javax.annotation.Nullable; - -public class ProfilingActivationListener implements Closeable { - - static { - // ContextStorage.addWrapper must - // * happen before anyone accesses any Context - // * happen exactly once - // The "exactly" once part is why we use a static initializer: - // If an Otel-SDK is created and immediately shutdown again and if we create another SDK - // afterwards, we might accidentally register the wrapper twice - ContextStorage.addWrapper(ContextStorageWrapper::new); - } - - // For testing only - static void ensureInitialized() { - // does nothing but ensures that the static initializer ran - } - - // In normal use-cases there is only one ProfilingActivationListener active or zero - // (e.g. after SDK shutdown). However, in theory nothing prevents users from starting - // two SDKs at the same time, so it is safest to use a List here. - private static volatile List activeListeners = - Collections.emptyList(); - - private static class ContextStorageWrapper implements ContextStorage { - - private final ContextStorage delegate; - - private ContextStorageWrapper(ContextStorage delegate) { - this.delegate = delegate; - } - - @Override - public Scope attach(Context toAttach) { - List listeners = activeListeners; - if (listeners.isEmpty()) { - // no unnecessary allocations when no listener is active - return delegate.attach(toAttach); - } - Span attached = spanFromContextNullSafe(toAttach); - Span oldCtx = spanFromContextNullSafe(delegate.current()); - for (ProfilingActivationListener listener : listeners) { - listener.beforeActivate(oldCtx, attached); - } - Scope delegateScope = delegate.attach(toAttach); - return () -> { - delegateScope.close(); - Span newCtx = spanFromContextNullSafe(delegate.current()); - for (ProfilingActivationListener listener : listeners) { - listener.afterDeactivate(attached, newCtx); - } - }; - } - - Span spanFromContextNullSafe(@Nullable Context context) { - if (context == null) { - return Span.getInvalid(); - } - return Span.fromContext(context); - } - - @Nullable - @Override - public Context current() { - return delegate.current(); - } - - @Override - public Context root() { - return delegate.root(); - } - } - - private final SamplingProfiler profiler; - - private ProfilingActivationListener(SamplingProfiler profiler) { - this.profiler = profiler; - } - - public static ProfilingActivationListener register(SamplingProfiler profiler) { - ProfilingActivationListener result = new ProfilingActivationListener(profiler); - synchronized (ProfilingActivationListener.class) { - List listenersList = new ArrayList<>(activeListeners); - listenersList.add(result); - activeListeners = Collections.unmodifiableList(listenersList); - } - return result; - } - - @Override - public void close() { - synchronized (ProfilingActivationListener.class) { - List listenersList = new ArrayList<>(activeListeners); - listenersList.remove(this); - activeListeners = Collections.unmodifiableList(listenersList); - } - } - - public void beforeActivate(Span oldContext, Span newContext) { - if (newContext.getSpanContext().isValid() - && newContext.getSpanContext().isSampled() - && !newContext.getSpanContext().isRemote() - && !ThreadUtils.isVirtual(Thread.currentThread())) { - - SpanContext oldSpanContext = oldContext.getSpanContext(); - boolean isOldContextLocalSpan = oldSpanContext.isValid() && !oldSpanContext.isRemote(); - profiler.onActivation(newContext, isOldContextLocalSpan ? oldContext : null); - } - } - - public void afterDeactivate(Span deactivatedContext, Span newContext) { - if (deactivatedContext.getSpanContext().isValid() - && deactivatedContext.getSpanContext().isSampled() - && !deactivatedContext.getSpanContext().isRemote() - && !ThreadUtils.isVirtual(Thread.currentThread())) { - - SpanContext newSpanContext = newContext.getSpanContext(); - boolean isNewContextLocalSpan = newSpanContext.isValid() && !newSpanContext.isRemote(); - profiler.onDeactivation(deactivatedContext, isNewContextLocalSpan ? newContext : null); - } - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/SamplingProfiler.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/SamplingProfiler.java deleted file mode 100644 index 130e1c1b..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/SamplingProfiler.java +++ /dev/null @@ -1,1054 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import static java.nio.file.StandardOpenOption.READ; -import static java.nio.file.StandardOpenOption.WRITE; - -import co.elastic.otel.common.config.WildcardMatcher; -import co.elastic.otel.common.util.ExecutorUtils; -import co.elastic.otel.profiler.asyncprofiler.JfrParser; -import co.elastic.otel.profiler.collections.Long2ObjectHashMap; -import co.elastic.otel.profiler.pooling.Allocator; -import co.elastic.otel.profiler.pooling.ObjectPool; -import com.lmax.disruptor.EventFactory; -import com.lmax.disruptor.EventPoller; -import com.lmax.disruptor.EventTranslatorTwoArg; -import com.lmax.disruptor.RingBuffer; -import com.lmax.disruptor.Sequence; -import com.lmax.disruptor.SequenceBarrier; -import com.lmax.disruptor.WaitStrategy; -import io.opentelemetry.api.trace.Span; -import io.opentelemetry.api.trace.Tracer; -import java.io.File; -import java.io.IOException; -import java.nio.Buffer; -import java.nio.ByteBuffer; -import java.nio.channels.ClosedByInterruptException; -import java.nio.channels.FileChannel; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.StandardOpenOption; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Date; -import java.util.List; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.LockSupport; -import java.util.function.Supplier; -import java.util.logging.Level; -import java.util.logging.Logger; -import javax.annotation.Nullable; -import one.profiler.AsyncProfiler; - -/** - * Correlates {@link ActivationEvent}s with {@link StackFrame}s which are recorded by {@link - * AsyncProfiler}, a native {@code - * AsyncGetCallTree}-based (and therefore non - * safepoint-biased) JVMTI agent. - * - *

Recording of {@link ActivationEvent}s: - * - *

The {@link #onActivation} and {@link #onDeactivation} methods are called by {@link - * ProfilingActivationListener} which register an {@link ActivationEvent} to a {@linkplain - * #eventBuffer ring buffer} whenever a {@link Span} gets {@link Span#activate()}d or {@link - * Span#deactivate()}d while a {@linkplain #profilingSessionOngoing profiling session is ongoing}. A - * background thread consumes the {@link ActivationEvent}s and writes them to a {@linkplain - * #activationEventsBuffer direct buffer} which is flushed to a {@linkplain - * #activationEventsFileChannel file}. That is necessary because within a profiling session (which - * lasts 10s by default) there may be many more {@link ActivationEvent}s than the ring buffer {@link - * #RING_BUFFER_SIZE can hold}. The file can hold {@link #ACTIVATION_EVENTS_IN_FILE} events and each - * is {@link ActivationEvent#SERIALIZED_SIZE} in size. This process is completely garbage free - * thanks to the {@link RingBuffer} acting as an object pool for {@link ActivationEvent}s. - * - *

Recording stack traces: - * - *

The same background thread that processes the {@link ActivationEvent}s starts the wall clock - * profiler of async-profiler via {@link AsyncProfiler#execute(String)}. After the {@link - * InferredSpansConfiguration#getProfilingDuration()} is over it stops the profiling and starts - * processing the JFR file created by async-profiler with {@link JfrParser}. - * - *

Correlating {@link ActivationEvent}s with the traces recorded by {@link AsyncProfiler}: - * - *

After both the JFR file and the file containing the {@link ActivationEvent}s have been - * written, it's now time to process them in tandem by correlating based on thread ids and - * timestamps. The result of this correlation, performed by {@link #processTraces}, are {@link - * CallTree}s which are created for each thread which has seen an {@linkplain Span#activate() - * activation} and at least one stack trace. Once {@linkplain - * ActivationEvent#handleDeactivationEvent(SamplingProfiler) handling the deactivation event} of the - * root span in a thread (after which {@link ElasticApmTracer#getActive()} would return {@code - * null}), the {@link CallTree} is {@linkplain CallTree#spanify(CallTree.Root, TraceContext) - * converted into regular spans}. - * - *

Overall, the allocation rate does not depend on the number of {@link ActivationEvent}s but - * only on {@link InferredSpansConfiguration#getProfilingInterval()} and {@link - * InferredSpansConfiguration#getSamplingInterval()}. Having said that, there are some optimizations - * so that the JFR file is not processed at all if there have not been any {@link ActivationEvent} - * in a given profiling session. Also, only if there's a {@link CallTree.Root} for a {@link - * StackTraceEvent}, we will {@link JfrParser#resolveStackTrace(long, boolean, List, int) resolve - * the full stack trace}. - */ -class SamplingProfiler implements Runnable { - - private static final String LIB_DIR_PROPERTY_NAME = "one.profiler.extractPath"; - - private static final Logger logger = Logger.getLogger(SamplingProfiler.class.getName()); - private static final int ACTIVATION_EVENTS_IN_FILE = 1_000_000; - private static final int MAX_STACK_DEPTH = 256; - private static final int PRE_ALLOCATE_ACTIVATION_EVENTS_FILE_MB = 10; - private static final int MAX_ACTIVATION_EVENTS_FILE_SIZE = - ACTIVATION_EVENTS_IN_FILE * ActivationEvent.SERIALIZED_SIZE; - private static final int ACTIVATION_EVENTS_BUFFER_SIZE = - ActivationEvent.SERIALIZED_SIZE * 4 * 1024; - private final EventTranslatorTwoArg ACTIVATION_EVENT_TRANSLATOR = - new EventTranslatorTwoArg() { - @Override - public void translateTo( - ActivationEvent event, long sequence, Span active, Span previouslyActive) { - event.activation( - active, Thread.currentThread().getId(), previouslyActive, clock.nanoTime(), clock); - } - }; - private final EventTranslatorTwoArg DEACTIVATION_EVENT_TRANSLATOR = - new EventTranslatorTwoArg() { - @Override - public void translateTo( - ActivationEvent event, long sequence, Span active, Span previouslyActive) { - event.deactivation( - active, Thread.currentThread().getId(), previouslyActive, clock.nanoTime(), clock); - } - }; - // sizeof(ActivationEvent) is 176B so the ring buffer should be around 880KiB - static final int RING_BUFFER_SIZE = 4 * 1024; - - // Visible for testing - final InferredSpansConfiguration config; - private final ScheduledExecutorService scheduler; - private final Long2ObjectHashMap profiledThreads = new Long2ObjectHashMap<>(); - private final RingBuffer eventBuffer; - private volatile boolean profilingSessionOngoing = false; - private final Sequence sequence; - private final SpanAnchoredClock clock; - private final ObjectPool rootPool; - private final ThreadMatcher threadMatcher = new ThreadMatcher(); - private final EventPoller poller; - @Nullable private File jfrFile; - private boolean canDeleteJfrFile; - private final WriteActivationEventToFileHandler writeActivationEventToFileHandler = - new WriteActivationEventToFileHandler(); - @Nullable private JfrParser jfrParser; - private volatile int profilingSessions; - - private final ByteBuffer activationEventsBuffer; - - /** - * Used to efficiently write {@link #activationEventsBuffer} via {@link - * FileChannel#write(ByteBuffer)} - */ - @Nullable private File activationEventsFile; - - private boolean canDeleteActivationEventsFile; - - @Nullable private FileChannel activationEventsFileChannel; - private final ObjectPool callTreePool; - private final TraceContext contextForLogging; - - private final ProfilingActivationListener activationListener; - - private boolean previouslyEnabled = false; - - private final Supplier tracerProvider; - - private final AsyncProfiler profiler; - - /** - * Creates a sampling profiler, optionally relying on existing files. - * - *

This constructor is most likely used for tests that rely on a known set of files - * - * @param tracer tracer - * @param nanoClock clock - * @param activationEventsFile activation events file, if {@literal null} a temp file will be used - * @param jfrFile java flight recorder file, if {@literal null} a temp file will be used instead - */ - SamplingProfiler( - InferredSpansConfiguration config, - SpanAnchoredClock nanoClock, - Supplier tracerProvider, - @Nullable File activationEventsFile, - @Nullable File jfrFile) { - this.config = config; - this.tracerProvider = tracerProvider; - this.scheduler = - Executors.newSingleThreadScheduledExecutor( - ExecutorUtils.threadFactory("inferred-spans", true)); - this.clock = nanoClock; - this.eventBuffer = createRingBuffer(); - this.sequence = new Sequence(); - // tells the ring buffer to not override slots which have not been read yet - this.eventBuffer.addGatingSequences(sequence); - this.poller = eventBuffer.newPoller(); - contextForLogging = new TraceContext(); - this.callTreePool = - ObjectPool.createRecyclable( - 2 * 1024, - new Allocator() { - @Override - public CallTree createInstance() { - return new CallTree(); - } - }); - // call tree roots are pooled so that fast activations/deactivations with no associated stack - // traces don't cause allocations - this.rootPool = - ObjectPool.createRecyclable( - 512, - new Allocator() { - @Override - public CallTree.Root createInstance() { - return new CallTree.Root(); - } - }); - this.jfrFile = jfrFile; - activationEventsBuffer = ByteBuffer.allocateDirect(ACTIVATION_EVENTS_BUFFER_SIZE); - this.activationEventsFile = activationEventsFile; - profiler = loadProfiler(); - activationListener = ProfilingActivationListener.register(this); - } - - private AsyncProfiler loadProfiler() { - String libDir = config.getProfilerLibDirectory(); - try { - Files.createDirectories(Paths.get(libDir)); - } catch (IOException e) { - throw new RuntimeException("Failed to create directory to extract lib to", e); - } - System.setProperty(LIB_DIR_PROPERTY_NAME, libDir); - return AsyncProfiler.getInstance(); - } - - /** - * For testing only! This method must only be called in tests and some period after activation / - * deactivation events, as otherwise it is racy. - * - * @param thread the Thread to check. - * @return true, if profiling is active for the given thread. - */ - boolean isProfilingActiveOnThread(Thread thread) { - return profiledThreads.containsKey(thread.getId()); - } - - private synchronized void createFilesIfRequired() throws IOException { - if (jfrFile == null || !jfrFile.exists()) { - jfrFile = File.createTempFile("apm-traces-", ".jfr"); - jfrFile.deleteOnExit(); - canDeleteJfrFile = true; - } - if (activationEventsFile == null || !activationEventsFile.exists()) { - activationEventsFile = File.createTempFile("apm-activation-events-", ".bin"); - activationEventsFile.deleteOnExit(); - canDeleteActivationEventsFile = true; - } - if (activationEventsFileChannel == null || !activationEventsFileChannel.isOpen()) { - activationEventsFileChannel = - FileChannel.open( - activationEventsFile.toPath(), StandardOpenOption.READ, StandardOpenOption.WRITE); - } - if (activationEventsFileChannel.size() == 0) { - preAllocate(activationEventsFileChannel, PRE_ALLOCATE_ACTIVATION_EVENTS_FILE_MB); - } - } - - // visible for benchmarks - public void skipToEndOfActivationEventsFile() throws IOException { - activationEventsFileChannel.position(activationEventsFileChannel.size()); - } - - /** - * Makes sure that the first blocks of the file are contiguous to provide fast sequential access - */ - private static void preAllocate(FileChannel channel, int mb) throws IOException { - long initialPos = channel.position(); - ByteBuffer oneKb = ByteBuffer.allocate(1024); - for (int i = 0; i < mb * 1024; i++) { - channel.write(oneKb); - ((Buffer) oneKb).clear(); - } - channel.position(initialPos); - } - - private RingBuffer createRingBuffer() { - return RingBuffer.createMultiProducer( - new EventFactory() { - @Override - public ActivationEvent newInstance() { - return new ActivationEvent(); - } - }, - RING_BUFFER_SIZE, - new NoWaitStrategy()); - } - - /** - * Called whenever a span is activated. - * - *

This and {@link #onDeactivation} are the only methods which are executed in a multi-threaded - * context. - * - * @param activeSpan the span which is about to be activated - * @param previouslyActive the span which has previously been activated - * @return {@code true}, if the event could be processed, {@code false} if the internal event - * queue is full which means the event has been discarded - */ - public boolean onActivation(Span activeSpan, @Nullable Span previouslyActive) { - if (profilingSessionOngoing) { - if (previouslyActive == null) { - profiler.addThread(Thread.currentThread()); - } - boolean success = - eventBuffer.tryPublishEvent(ACTIVATION_EVENT_TRANSLATOR, activeSpan, previouslyActive); - if (!success) { - logger.fine("Could not add activation event to ring buffer as no slots are available"); - } - return success; - } - return false; - } - - /** - * Called whenever a span is deactivated. - * - *

This and {@link #onActivation} are the only methods which are executed in a multi-threaded - * context. - * - * @param activeSpan the span which is about to be activated - * @param previouslyActive the span which has previously been activated - * @return {@code true}, if the event could be processed, {@code false} if the internal event - * queue is full which means the event has been discarded - */ - public boolean onDeactivation(Span activeSpan, @Nullable Span previouslyActive) { - if (profilingSessionOngoing) { - if (previouslyActive == null) { - profiler.removeThread(Thread.currentThread()); - } - boolean success = - eventBuffer.tryPublishEvent(DEACTIVATION_EVENT_TRANSLATOR, activeSpan, previouslyActive); - if (!success) { - logger.fine("Could not add deactivation event to ring buffer as no slots are available"); - } - return success; - } - return false; - } - - @Override - public void run() { - - // lazily create temporary files - try { - createFilesIfRequired(); - } catch (IOException e) { - logger.log(Level.SEVERE, "unable to initialize profiling files", e); - return; - } - - Duration profilingDuration = config.getProfilingDuration(); - boolean postProcessingEnabled = config.isPostProcessingEnabled(); - - setProfilingSessionOngoing(postProcessingEnabled); - - if (postProcessingEnabled) { - logger.fine("Start full profiling session (async-profiler and agent processing)"); - } else { - logger.fine("Start async-profiler profiling session"); - } - try { - profile(profilingDuration); - } catch (Throwable t) { - setProfilingSessionOngoing(false); - logger.log(Level.SEVERE, "Stopping profiler", t); - return; - } - logger.fine("End profiling session"); - - boolean interrupted = Thread.currentThread().isInterrupted(); - boolean continueProfilingSession = - config.isNonStopProfiling() && !interrupted && postProcessingEnabled; - setProfilingSessionOngoing(continueProfilingSession); - - if (!interrupted && !scheduler.isShutdown()) { - long delay = config.getProfilingInterval().toMillis() - profilingDuration.toMillis(); - scheduler.schedule(this, delay, TimeUnit.MILLISECONDS); - } - } - - private void profile(Duration profilingDuration) throws Exception { - try { - String startCommand = createStartCommand(); - String startMessage = profiler.execute(startCommand); - logger.fine(startMessage); - if (!profiledThreads.isEmpty()) { - restoreFilterState(profiler); - } - // Doesn't need to be atomic as this field is being updated only by a single thread - //noinspection NonAtomicOperationOnVolatileField - profilingSessions++; - - // When post-processing is disabled activation events are ignored, but we still need to invoke - // this method - // as it is the one enforcing the sampling session duration. As a side effect it will also - // consume - // residual activation events if post-processing is disabled dynamically - consumeActivationEventsFromRingBufferAndWriteToFile(profilingDuration); - - String stopMessage = profiler.execute("stop"); - logger.fine(stopMessage); - - // When post-processing is disabled, jfr file will not be parsed and the heavy processing will - // not occur - // as this method aborts when no activation events are buffered - processTraces(); - } catch (InterruptedException | ClosedByInterruptException e) { - try { - profiler.stop(); - } catch (IllegalStateException ignore) { - } - Thread.currentThread().interrupt(); - } - } - - String createStartCommand() { - StringBuilder startCommand = - new StringBuilder("start,jfr,clock=m,event=wall,cstack=n,interval=") - .append(config.getSamplingInterval().toMillis()) - .append("ms,filter,file=") - .append(jfrFile) - .append(",safemode=") - .append(config.getAsyncProfilerSafeMode()); - if (!config.isProfilingLoggingEnabled()) { - startCommand.append(",loglevel=none"); - } - return startCommand.toString(); - } - - /** - * When doing continuous profiling (interval=duration), we have to tell async-profiler which - * threads it should profile after re-starting it. - */ - private void restoreFilterState(AsyncProfiler asyncProfiler) { - threadMatcher.forEachThread( - new ThreadMatcher.NonCapturingPredicate.KeySet>() { - @Override - public boolean test(Thread thread, Long2ObjectHashMap.KeySet profiledThreads) { - return profiledThreads.contains(thread.getId()); - } - }, - profiledThreads.keySet(), - new ThreadMatcher.NonCapturingConsumer() { - @Override - public void accept(Thread thread, AsyncProfiler asyncProfiler) { - asyncProfiler.addThread(thread); - } - }, - asyncProfiler); - } - - private void consumeActivationEventsFromRingBufferAndWriteToFile(Duration profilingDuration) - throws Exception { - resetActivationEventBuffer(); - long threshold = System.currentTimeMillis() + profilingDuration.toMillis(); - long initialSleep = 100_000; - long maxSleep = 10_000_000; - long sleep = initialSleep; - while (System.currentTimeMillis() < threshold && !Thread.currentThread().isInterrupted()) { - if (activationEventsFileChannel.position() < MAX_ACTIVATION_EVENTS_FILE_SIZE) { - EventPoller.PollState poll = consumeActivationEventsFromRingBufferAndWriteToFile(); - if (poll == EventPoller.PollState.PROCESSING) { - sleep = initialSleep; - // don't sleep, after consuming the events there might be new ones in the ring buffer - } else { - if (sleep < maxSleep) { - sleep *= 2; - } - LockSupport.parkNanos(sleep); - } - } else { - logger.warning("The activation events file is full. Try lowering the profiling_duration."); - // the file is full, sleep the rest of the profilingDuration - Thread.sleep(Math.max(0, threshold - System.currentTimeMillis())); - } - } - } - - EventPoller.PollState consumeActivationEventsFromRingBufferAndWriteToFile() throws Exception { - createFilesIfRequired(); - return poller.poll(writeActivationEventToFileHandler); - } - - public void processTraces() throws IOException { - if (jfrParser == null) { - jfrParser = new JfrParser(); - } - if (Thread.currentThread().isInterrupted()) { - return; - } - createFilesIfRequired(); - - long eof = startProcessingActivationEventsFile(); - if (eof == 0 && activationEventsBuffer.limit() == 0 && profiledThreads.isEmpty()) { - logger.fine("No activation events during this period. Skip processing stack traces."); - return; - } - long start = System.nanoTime(); - List excludedClasses = config.getExcludedClasses(); - List includedClasses = config.getIncludedClasses(); - if (config.isBackupDiagnosticFiles()) { - backupDiagnosticFiles(eof); - } - try { - jfrParser.parse(jfrFile, excludedClasses, includedClasses); - final List stackTraceEvents = getSortedStackTraceEvents(jfrParser); - if (logger.isLoggable(Level.FINE)) { - logger.log(Level.FINE, "Processing {0} stack traces", stackTraceEvents.size()); - } - List stackFrames = new ArrayList<>(); - ActivationEvent event = new ActivationEvent(); - long inferredSpansMinDuration = getInferredSpansMinDurationNs(); - for (StackTraceEvent stackTrace : stackTraceEvents) { - processActivationEventsUpTo(stackTrace.nanoTime, event, eof); - CallTree.Root root = profiledThreads.get(stackTrace.threadId); - if (root != null) { - jfrParser.resolveStackTrace(stackTrace.stackTraceId, stackFrames, MAX_STACK_DEPTH); - if (stackFrames.size() == MAX_STACK_DEPTH) { - logger.fine( - "Max stack depth reached. Set profiling_included_classes or profiling_excluded_classes."); - } - // stack frames may not contain any Java frames - // see - // https://github.com/jvm-profiling-tools/async-profiler/issues/271#issuecomment-582430233 - if (!stackFrames.isEmpty()) { - try { - root.addStackTrace( - stackFrames, stackTrace.nanoTime, callTreePool, inferredSpansMinDuration); - } catch (Exception e) { - logger.log( - Level.WARNING, - "Removing call tree for thread {0} because of exception while adding a stack trace: {1} {2}", - new Object[] {stackTrace.threadId, e.getClass(), e.getMessage()}); - logger.log(Level.FINE, e.getMessage(), e); - profiledThreads.remove(stackTrace.threadId); - } - } - } - stackFrames.clear(); - } - // process all activation events that happened after the last stack trace event - // otherwise we may miss root deactivations - processActivationEventsUpTo(System.nanoTime(), event, eof); - } finally { - if (logger.isLoggable(Level.FINE)) { - logger.log(Level.FINE, "Processing traces took {0}us", (System.nanoTime() - start) / 1000); - } - jfrParser.resetState(); - resetActivationEventBuffer(); - } - } - - private void backupDiagnosticFiles(long eof) throws IOException { - String now = String.format("%tFT% 0) { - activationEventsFileChannel.transferTo(0, eof, activationsFile); - } else { - int position = activationEventsBuffer.position(); - activationsFile.write(activationEventsBuffer); - activationEventsBuffer.position(position); - } - } - Files.copy(jfrFile.toPath(), profilerDir.resolve(now + "-traces.jfr")); - } - - private long getInferredSpansMinDurationNs() { - return config.getInferredSpansMinDuration().toNanos(); - } - - /** - * Returns stack trace events of relevant threads sorted by timestamp. The events in the JFR file - * are not in order. Even for the same thread, a more recent event might come before an older - * event. In order to be able to correlate stack trace events and activation events, both need to - * be in order. - * - *

Returns only events for threads where at least one activation happened (because only those - * are profiled by async-profiler) - */ - private List getSortedStackTraceEvents(JfrParser jfrParser) throws IOException { - final List stackTraceEvents = new ArrayList<>(); - jfrParser.consumeStackTraces( - new JfrParser.StackTraceConsumer() { - @Override - public void onCallTree(long threadId, long stackTraceId, long nanoTime) { - stackTraceEvents.add(new StackTraceEvent(nanoTime, stackTraceId, threadId)); - } - }); - Collections.sort(stackTraceEvents); - return stackTraceEvents; - } - - void processActivationEventsUpTo(long timestamp, long eof) throws IOException { - processActivationEventsUpTo(timestamp, new ActivationEvent(), eof); - } - - public void processActivationEventsUpTo(long timestamp, ActivationEvent event, long eof) - throws IOException { - FileChannel activationEventsFileChannel = this.activationEventsFileChannel; - ByteBuffer buf = activationEventsBuffer; - long previousTimestamp = 0; - while (buf.hasRemaining() || activationEventsFileChannel.position() < eof) { - if (!buf.hasRemaining()) { - readActivationEventsToBuffer(activationEventsFileChannel, eof, buf); - } - long eventTimestamp = peekLong(buf); - if (eventTimestamp < previousTimestamp && logger.isLoggable(Level.FINE)) { - logger.log( - Level.FINE, - "Timestamp of current activation event ({0}) is lower than the one from the previous event ({1})", - new Object[] {eventTimestamp, previousTimestamp}); - } - previousTimestamp = eventTimestamp; - if (eventTimestamp <= timestamp) { - event.deserialize(buf); - try { - event.handle(this); - } catch (Exception e) { - logger.log( - Level.WARNING, - "Removing call tree for thread {0} because of exception while handling activation event: {1} {2}", - new Object[] {event.threadId, e.getClass(), e.getMessage()}); - logger.log(Level.FINE, e.getMessage(), e); - profiledThreads.remove(event.threadId); - } - } else { - return; - } - } - } - - private void readActivationEventsToBuffer( - FileChannel activationEventsFileChannel, long eof, ByteBuffer byteBuffer) throws IOException { - Buffer buf = byteBuffer; - buf.clear(); - long remaining = eof - activationEventsFileChannel.position(); - activationEventsFileChannel.read(byteBuffer); - buf.flip(); - if (remaining < buf.capacity()) { - buf.limit((int) remaining); - } - } - - private static long peekLong(ByteBuffer buf) { - int pos = buf.position(); - try { - return buf.getLong(); - } finally { - ((Buffer) buf).position(pos); - } - } - - public void resetActivationEventBuffer() throws IOException { - ((Buffer) activationEventsBuffer).clear(); - if (activationEventsFileChannel != null && activationEventsFileChannel.isOpen()) { - activationEventsFileChannel.position(0L); - } - } - - private void flushActivationEvents() throws IOException { - if (activationEventsBuffer.position() > 0) { - ((Buffer) activationEventsBuffer).flip(); - activationEventsFileChannel.write(activationEventsBuffer); - ((Buffer) activationEventsBuffer).clear(); - } - } - - long startProcessingActivationEventsFile() throws IOException { - Buffer activationEventsBuffer = this.activationEventsBuffer; - if (activationEventsFileChannel.position() > 0) { - flushActivationEvents(); - activationEventsBuffer.limit(0); - } else { - activationEventsBuffer.flip(); - } - long eof = activationEventsFileChannel.position(); - activationEventsFileChannel.position(0); - return eof; - } - - void copyFromFiles(Path activationEvents, Path traces) throws IOException { - createFilesIfRequired(); - - FileChannel otherActivationsChannel = FileChannel.open(activationEvents, READ); - activationEventsFileChannel.transferFrom( - otherActivationsChannel, 0, otherActivationsChannel.size()); - activationEventsFileChannel.position(otherActivationsChannel.size()); - FileChannel otherTracesChannel = FileChannel.open(traces, READ); - FileChannel.open(jfrFile.toPath(), WRITE) - .transferFrom(otherTracesChannel, 0, otherTracesChannel.size()); - } - - public void start() { - scheduler.submit(this); - } - - public void stop() throws Exception { - // cancels/interrupts the profiling thread - // implicitly clears profiled threads - scheduler.shutdown(); - scheduler.awaitTermination(10, TimeUnit.SECONDS); - - activationListener.close(); - - if (activationEventsFileChannel != null) { - activationEventsFileChannel.close(); - } - - if (jfrFile != null && canDeleteJfrFile) { - jfrFile.delete(); - } - if (activationEventsFile != null && canDeleteActivationEventsFile) { - activationEventsFile.delete(); - } - } - - void setProfilingSessionOngoing(boolean profilingSessionOngoing) { - this.profilingSessionOngoing = profilingSessionOngoing; - if (!profilingSessionOngoing) { - clearProfiledThreads(); - } else if (!profiledThreads.isEmpty() && logger.isLoggable(Level.FINE)) { - logger.log(Level.FINE, "Retaining {0} call tree roots", profiledThreads.size()); - } - } - - public void clearProfiledThreads() { - for (CallTree.Root root : profiledThreads.values()) { - root.recycle(callTreePool, rootPool); - } - profiledThreads.clear(); - } - - // for testing - CallTree.Root getRoot() { - return profiledThreads.get(Thread.currentThread().getId()); - } - - void clear() throws IOException { - // consume all remaining events from the ring buffer - try { - poller.poll( - new EventPoller.Handler() { - @Override - public boolean onEvent(ActivationEvent event, long sequence, boolean endOfBatch) { - SamplingProfiler.this.sequence.set(sequence); - return true; - } - }); - } catch (Exception e) { - throw new RuntimeException(e); - } - resetActivationEventBuffer(); - profiledThreads.clear(); - callTreePool.clear(); - rootPool.clear(); - } - - int getProfilingSessions() { - return profilingSessions; - } - - public SpanAnchoredClock getClock() { - return clock; - } - - public static class StackTraceEvent implements Comparable { - private final long nanoTime; - private final long stackTraceId; - private final long threadId; - - private StackTraceEvent(long nanoTime, long stackTraceId, long threadId) { - this.nanoTime = nanoTime; - this.stackTraceId = stackTraceId; - this.threadId = threadId; - } - - public long getThreadId() { - return threadId; - } - - public long getNanoTime() { - return nanoTime; - } - - public long getStackTraceId() { - return stackTraceId; - } - - @Override - public int compareTo(StackTraceEvent o) { - return Long.compare(nanoTime, o.nanoTime); - } - } - - private static class ActivationEvent { - public static final int SERIALIZED_SIZE = - Long.SIZE / Byte.SIZE - + // timestamp - TraceContext.SERIALIZED_LENGTH - + // traceContextBuffer - TraceContext.SERIALIZED_LENGTH - + // previousContextBuffer - 1 - + // rootContext - Long.SIZE / Byte.SIZE - + // threadId - 1; // activation - - private long timestamp; - private byte[] traceContextBuffer = new byte[TraceContext.SERIALIZED_LENGTH]; - private byte[] previousContextBuffer = new byte[TraceContext.SERIALIZED_LENGTH]; - private boolean rootContext; - private long threadId; - private boolean activation; - - public void activation( - Span context, - long threadId, - @Nullable Span previousContext, - long nanoTime, - SpanAnchoredClock clock) { - set(context, threadId, true, previousContext, nanoTime, clock); - } - - public void deactivation( - Span context, - long threadId, - @Nullable Span previousContext, - long nanoTime, - SpanAnchoredClock clock) { - set(context, threadId, false, previousContext, nanoTime, clock); - } - - private void set( - Span traceContext, - long threadId, - boolean activation, - @Nullable Span previousContext, - long nanoTime, - SpanAnchoredClock clock) { - TraceContext.serialize(traceContext, clock.getAnchor(traceContext), traceContextBuffer); - this.threadId = threadId; - this.activation = activation; - if (previousContext != null) { - TraceContext.serialize( - previousContext, clock.getAnchor(previousContext), previousContextBuffer); - rootContext = false; - } else { - rootContext = true; - } - this.timestamp = nanoTime; - } - - public void handle(SamplingProfiler samplingProfiler) { - if (logger.isLoggable(Level.FINE)) { - logger.log( - Level.FINE, - "Handling event timestamp={0} root={1} threadId={2} activation={3}", - new Object[] {timestamp, rootContext, threadId, activation}); - } - if (activation) { - handleActivationEvent(samplingProfiler); - } else { - handleDeactivationEvent(samplingProfiler); - } - } - - private void handleActivationEvent(SamplingProfiler samplingProfiler) { - if (rootContext) { - startProfiling(samplingProfiler); - } else { - CallTree.Root root = samplingProfiler.profiledThreads.get(threadId); - if (root != null) { - if (logger.isLoggable(Level.FINE)) { - logger.log(Level.FINE, "Handling activation for thread {0}", threadId); - } - root.onActivation(traceContextBuffer, timestamp); - } else if (logger.isLoggable(Level.FINE)) { - logger.log( - Level.FINE, - "Illegal state when handling activation event for thread {0}: no root found for this thread", - threadId); - } - } - } - - private void startProfiling(SamplingProfiler samplingProfiler) { - CallTree.Root root = - CallTree.createRoot(samplingProfiler.rootPool, traceContextBuffer, timestamp); - if (logger.isLoggable(Level.FINE)) { - logger.log( - Level.FINE, - "Create call tree ({0}) for thread {1}", - new Object[] {deserialize(samplingProfiler, traceContextBuffer), threadId}); - } - - CallTree.Root orphaned = samplingProfiler.profiledThreads.put(threadId, root); - if (orphaned != null) { - if (logger.isLoggable(Level.FINE)) { - logger.log( - Level.FINE, - "Illegal state when stopping profiling for thread {0}: orphaned root", - threadId); - } - orphaned.recycle(samplingProfiler.callTreePool, samplingProfiler.rootPool); - } - } - - private TraceContext deserialize(SamplingProfiler samplingProfiler, byte[] traceContextBuffer) { - samplingProfiler.contextForLogging.deserialize(traceContextBuffer); - return samplingProfiler.contextForLogging; - } - - private void handleDeactivationEvent(SamplingProfiler samplingProfiler) { - if (rootContext) { - stopProfiling(samplingProfiler); - } else { - CallTree.Root root = samplingProfiler.profiledThreads.get(threadId); - if (root != null) { - if (logger.isLoggable(Level.FINE)) { - logger.log(Level.FINE, "Handling deactivation for thread {0}", threadId); - } - root.onDeactivation(traceContextBuffer, previousContextBuffer, timestamp); - } else if (logger.isLoggable(Level.FINE)) { - logger.log( - Level.FINE, - "Illegal state when handling deactivation event for thread {0}: no root found for this thread", - threadId); - } - } - } - - private void stopProfiling(SamplingProfiler samplingProfiler) { - CallTree.Root callTree = samplingProfiler.profiledThreads.get(threadId); - if (callTree != null && callTree.getRootContext().traceIdAndIdEquals(traceContextBuffer)) { - if (logger.isLoggable(Level.FINE)) { - logger.log( - Level.FINE, - "End call tree ({0}) for thread {1}", - new Object[] {deserialize(samplingProfiler, traceContextBuffer), threadId}); - } - samplingProfiler.profiledThreads.remove(threadId); - try { - callTree.end( - samplingProfiler.callTreePool, samplingProfiler.getInferredSpansMinDurationNs()); - int createdSpans = - callTree.spanify(samplingProfiler.getClock(), samplingProfiler.tracerProvider.get()); - if (logger.isLoggable(Level.FINE)) { - if (createdSpans > 0) { - logger.log( - Level.FINE, - "Created spans ({0}) for thread {1}", - new Object[] {createdSpans, threadId}); - } else { - logger.log( - Level.FINE, - "Created no spans for thread {0} (count={1})", - new Object[] {threadId, callTree.getCount()}); - } - } - } finally { - callTree.recycle(samplingProfiler.callTreePool, samplingProfiler.rootPool); - } - } - } - - public void serialize(ByteBuffer buf) { - buf.putLong(timestamp); - buf.put(traceContextBuffer); - buf.put(previousContextBuffer); - buf.put(rootContext ? (byte) 1 : (byte) 0); - buf.putLong(threadId); - buf.put(activation ? (byte) 1 : (byte) 0); - } - - public void deserialize(ByteBuffer buf) { - timestamp = buf.getLong(); - buf.get(traceContextBuffer); - buf.get(previousContextBuffer); - rootContext = buf.get() == 1; - threadId = buf.getLong(); - activation = buf.get() == 1; - } - } - - /** - * Does not wait but immediately returns the highest sequence which is available for read We never - * want to wait until new elements are available, we just want to process all available events - */ - private static class NoWaitStrategy implements WaitStrategy { - - @Override - public long waitFor( - long sequence, Sequence cursor, Sequence dependentSequence, SequenceBarrier barrier) { - return dependentSequence.get(); - } - - @Override - public void signalAllWhenBlocking() {} - } - - // extracting to a class instead of instantiating an anonymous inner class makes a huge difference - // in allocations - private class WriteActivationEventToFileHandler implements EventPoller.Handler { - @Override - public boolean onEvent(ActivationEvent event, long sequence, boolean endOfBatch) - throws IOException { - if (endOfBatch) { - SamplingProfiler.this.sequence.set(sequence); - } - if (activationEventsFileChannel.size() < MAX_ACTIVATION_EVENTS_FILE_SIZE) { - event.serialize(activationEventsBuffer); - if (!activationEventsBuffer.hasRemaining()) { - flushActivationEvents(); - } - return true; - } - return false; - } - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/SpanAnchoredClock.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/SpanAnchoredClock.java deleted file mode 100644 index 243add58..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/SpanAnchoredClock.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import co.elastic.otel.common.WeakConcurrent; -import com.blogspot.mydailyjava.weaklockfree.WeakConcurrentMap; -import io.opentelemetry.api.trace.Span; -import io.opentelemetry.context.Context; -import io.opentelemetry.sdk.trace.ReadWriteSpan; - -public class SpanAnchoredClock { - private final WeakConcurrentMap nanoTimeOffsetMap = WeakConcurrent.createMap(); - - public void onSpanStart(ReadWriteSpan started, Context parentContext) { - Span parent = Span.fromContext(parentContext); - Long parentAnchor = parent == null ? null : nanoTimeOffsetMap.get(parent); - if (parentAnchor != null) { - nanoTimeOffsetMap.put(started, parentAnchor); - } else { - long spanLatency = started.getLatencyNanos(); - long clockNowNanos = nanoTime(); - long spanStartNanos = started.toSpanData().getStartEpochNanos(); - long anchor = spanStartNanos - spanLatency - clockNowNanos; - nanoTimeOffsetMap.put(started, anchor); - } - } - - public long nanoTime() { - return System.nanoTime(); - } - - /** - * Returns a value which allows to translate timestamps obtained via {@link #nanoTime()} to - * absolute epoche time stamps based on the start-time of the given span. - * - *

This anchor value can be used in {@link #toEpochNanos(long, long)} to perform the - * translation. - */ - public long getAnchor(Span span) { - return nanoTimeOffsetMap.get(span); - } - - /** - * Translates a timestamp obtained via {@link #nanoTime()} with the help of an anchor obtaines via - * {@link #getAnchor(Span)} to an absolute nano-precision epoch timestamp. - */ - public long toEpochNanos(long anchor, long recordedNanoTime) { - return recordedNanoTime + anchor; - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/StackFrame.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/StackFrame.java deleted file mode 100644 index c58c40c9..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/StackFrame.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import java.util.Objects; -import javax.annotation.Nullable; - -public class StackFrame { - @Nullable private final String className; - private final String methodName; - - public static StackFrame of(@Nullable String className, String methodName) { - return new StackFrame(className, methodName); - } - - public StackFrame(@Nullable String className, String methodName) { - this.className = className; - this.methodName = methodName; - } - - @Nullable - public String getClassName() { - return className; - } - - public String getMethodName() { - return methodName; - } - - public int getSimpleClassNameOffset() { - if (className != null) { - return className.lastIndexOf('.') + 1; - } - return 0; - } - - public void appendFileName(StringBuilder replaceBuilder) { - final String unknownCodeSource = ""; - if (className != null) { - int fileNameEnd = className.indexOf('$'); - if (fileNameEnd < 0) { - fileNameEnd = className.length(); - } - int classNameStart = className.lastIndexOf('.'); - if (classNameStart < fileNameEnd && fileNameEnd <= className.length()) { - replaceBuilder.append(className, classNameStart + 1, fileNameEnd); - replaceBuilder.append(".java"); - } else { - replaceBuilder.append(unknownCodeSource); - } - } else { - replaceBuilder.append(unknownCodeSource); - } - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - StackFrame that = (StackFrame) o; - - if (!Objects.equals(className, that.className)) { - return false; - } - return methodName.equals(that.methodName); - } - - @Override - public int hashCode() { - int result = className != null ? className.hashCode() : 0; - result = 31 * result + methodName.hashCode(); - return result; - } - - @Override - public String toString() { - if (className == null) { - return methodName; - } - return className + '.' + methodName; - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/ThreadMatcher.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/ThreadMatcher.java deleted file mode 100644 index 9b1e1dff..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/ThreadMatcher.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -public class ThreadMatcher { - - private final ThreadGroup systemThreadGroup; - private Thread[] threads = new Thread[16]; - - public ThreadMatcher() { - ThreadGroup threadGroup = Thread.currentThread().getThreadGroup(); - while (threadGroup.getParent() != null) { - threadGroup = threadGroup.getParent(); - } - systemThreadGroup = threadGroup; - } - - public void forEachThread( - NonCapturingPredicate predicate, - S1 state1, - NonCapturingConsumer consumer, - S2 state2) { - int count = systemThreadGroup.activeCount(); - do { - int expectedArrayLength = count + (count / 2) + 1; - if (threads.length < expectedArrayLength) { - threads = new Thread[expectedArrayLength]; // slightly grow the array size - } - count = systemThreadGroup.enumerate(threads, true); - // return value of enumerate() must be strictly less than the array size according to javadoc - } while (count >= threads.length); - - for (int i = 0; i < count; i++) { - Thread thread = threads[i]; - if (predicate.test(thread, state1)) { - consumer.accept(thread, state2); - } - threads[i] = null; - } - } - - interface NonCapturingPredicate { - boolean test(T t, S state); - } - - interface NonCapturingConsumer { - void accept(T t, S state); - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/TraceContext.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/TraceContext.java deleted file mode 100644 index 0c0b0ad3..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/TraceContext.java +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import co.elastic.otel.common.util.HexUtils; -import co.elastic.otel.profiler.pooling.Recyclable; -import co.elastic.otel.profiler.util.ByteUtils; -import io.opentelemetry.api.trace.Span; -import io.opentelemetry.api.trace.SpanContext; -import io.opentelemetry.api.trace.TraceFlags; -import io.opentelemetry.api.trace.TraceState; -import io.opentelemetry.sdk.trace.ReadableSpan; -import javax.annotation.Nullable; - -/** - * A mutable (and therefore recyclable) class storing the relevant bits of {@link SpanContext} for - * generating inferred spans. Also stores a clock-anchor for the corresponding span obtained via - * {@link SpanAnchoredClock#getAnchor(Span)}. - */ -public class TraceContext implements Recyclable { - - public static final int SERIALIZED_LENGTH = 16 + 8 + 1 + 1 + 8 + 8; - private long traceIdLow; - private long traceIdHigh; - private long id; - - private boolean hasParentId; - private long parentId; - private byte flags; - - private long clockAnchor; - - public TraceContext() {} - - // For testing only - static TraceContext fromSpanContextWithZeroClockAnchor( - SpanContext ctx, @Nullable String parentSpanId) { - TraceContext result = new TraceContext(); - result.fillFromSpanContext(ctx, parentSpanId); - result.clockAnchor = 0L; - return result; - } - - private void fillFromSpanContext(SpanContext ctx, @Nullable String parentSpanId) { - id = HexUtils.hexToLong(ctx.getSpanId(), 0); - traceIdHigh = HexUtils.hexToLong(ctx.getTraceId(), 0); - traceIdLow = HexUtils.hexToLong(ctx.getTraceId(), 16); - if (parentSpanId != null) { - hasParentId = true; - parentId = HexUtils.hexToLong(parentSpanId, 0); - } else { - hasParentId = false; - } - flags = ctx.getTraceFlags().asByte(); - } - - public SpanContext toOtelSpanContext(StringBuilder temporaryBuilder) { - temporaryBuilder.setLength(0); - HexUtils.appendLongAsHex(traceIdHigh, temporaryBuilder); - HexUtils.appendLongAsHex(traceIdLow, temporaryBuilder); - String traceIdStr = temporaryBuilder.toString(); - - temporaryBuilder.setLength(0); - HexUtils.appendLongAsHex(id, temporaryBuilder); - String idStr = temporaryBuilder.toString(); - - return SpanContext.create( - traceIdStr, idStr, TraceFlags.fromByte(flags), TraceState.getDefault()); - } - - public long getSpanId() { - return id; - } - - public boolean idEquals(@Nullable TraceContext o) { - if (o == null) { - return false; - } - return id == o.id; - } - - public static long getSpanId(byte[] serialized) { - return ByteUtils.getLong(serialized, 16); - } - - public void deserialize(byte[] serialized) { - traceIdLow = ByteUtils.getLong(serialized, 0); - traceIdHigh = ByteUtils.getLong(serialized, 8); - id = ByteUtils.getLong(serialized, 16); - flags = serialized[24]; - hasParentId = serialized[25] != 0; - parentId = ByteUtils.getLong(serialized, 26); - clockAnchor = ByteUtils.getLong(serialized, 34); - } - - public static long getParentId(byte[] serializedTraceContext) { - boolean hasParent = serializedTraceContext[25] != 0; - if (!hasParent) { - return 0L; - } - return ByteUtils.getLong(serializedTraceContext, 26); - } - - public boolean traceIdAndIdEquals(byte[] otherSerialized) { - long otherTraceIdLow = ByteUtils.getLong(otherSerialized, 0); - if (otherTraceIdLow != traceIdLow) { - return false; - } - long otherTraceIdHigh = ByteUtils.getLong(otherSerialized, 8); - if (otherTraceIdHigh != traceIdHigh) { - return false; - } - long otherId = ByteUtils.getLong(otherSerialized, 16); - return id == otherId; - } - - public static void serialize(Span span, long clockAnchor, byte[] buffer) { - SpanContext ctx = span.getSpanContext(); - SpanContext parentSpanCtx = SpanContext.getInvalid(); - if (span instanceof ReadableSpan) { - parentSpanCtx = ((ReadableSpan) span).getParentSpanContext(); - } - - long id = HexUtils.hexToLong(ctx.getSpanId(), 0); - long traceIdHigh = HexUtils.hexToLong(ctx.getTraceId(), 0); - long traceIdLow = HexUtils.hexToLong(ctx.getTraceId(), 16); - byte flags = ctx.getTraceFlags().asByte(); - ByteUtils.putLong(buffer, 0, traceIdLow); - ByteUtils.putLong(buffer, 8, traceIdHigh); - ByteUtils.putLong(buffer, 16, id); - buffer[24] = flags; - if (parentSpanCtx.isValid()) { - buffer[25] = 1; - ByteUtils.putLong(buffer, 26, HexUtils.hexToLong(parentSpanCtx.getSpanId(), 0)); - } else { - buffer[25] = 0; - ByteUtils.putLong(buffer, 26, 0); - } - ByteUtils.putLong(buffer, 34, clockAnchor); - } - - public void serialize(byte[] buffer) { - ByteUtils.putLong(buffer, 0, traceIdLow); - ByteUtils.putLong(buffer, 8, traceIdHigh); - ByteUtils.putLong(buffer, 16, id); - buffer[24] = flags; - if (hasParentId) { - buffer[25] = 1; - ByteUtils.putLong(buffer, 26, parentId); - } else { - buffer[25] = 0; - ByteUtils.putLong(buffer, 26, 0); - } - ByteUtils.putLong(buffer, 34, clockAnchor); - } - - public byte[] serialize() { - byte[] result = new byte[SERIALIZED_LENGTH]; - serialize(result); - return result; - } - - @Override - public void resetState() { - traceIdLow = 0; - traceIdHigh = 0; - id = 0; - flags = 0; - clockAnchor = 0; - } - - public long getClockAnchor() { - return clockAnchor; - } - - @Override - public String toString() { - StringBuilder result = new StringBuilder(); - SpanContext otelSpanCtx = toOtelSpanContext(result); - result.setLength(0); - result.append(otelSpanCtx).append("(clock-anchor: ").append(clockAnchor).append(')'); - return result.toString(); - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/asyncprofiler/BufferedFile.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/asyncprofiler/BufferedFile.java deleted file mode 100644 index 33014888..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/asyncprofiler/BufferedFile.java +++ /dev/null @@ -1,442 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler.asyncprofiler; - -import co.elastic.otel.profiler.pooling.Recyclable; -import java.io.File; -import java.io.IOException; -import java.nio.Buffer; -import java.nio.ByteBuffer; -import java.nio.MappedByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.charset.StandardCharsets; -import java.nio.file.StandardOpenOption; -import javax.annotation.Nullable; - -/** - * An abstraction similar to {@link MappedByteBuffer} that allows to read the content of a file with - * an API that is similar to {@link ByteBuffer}. - * - *

Instances of this class hold a reusable buffer that contains a subset of the file, or the - * whole file if the buffer's capacity is greater or equal to the file's size. - * - *

Whenever calling a method like {@link #getLong()} or {@link #position(long)} would exceed the - * currently buffered range the same buffer is filled with a different range of the file. - * - *

The downside of {@link MappedByteBuffer} (and the reason for implementing this abstraction) is - * that calling methods like {@link MappedByteBuffer#get()} can increase time-to-safepoint. This is - * because these methods are implemented as JVM intrinsics. When the JVM executes an intrinsic, it - * does not switch to the native execution context which means that it's not ready to enter a - * safepoint whenever a intrinsic runs. As reading a file from disk can get stuck (for example when - * the disk is busy) calling {@link MappedByteBuffer#get()} may take a while to execute. While it's - * executing other threads have to wait for it to finish if the JVM wants to reach a safe point. - */ -class BufferedFile implements Recyclable { - - private static final int SIZE_OF_BYTE = 1; - private static final int SIZE_OF_SHORT = 2; - private static final int SIZE_OF_INT = 4; - private static final int SIZE_OF_LONG = 8; - - // The following constant are defined by the JFR file format for identifying the string encoding - private static final int STRING_ENCODING_NULL = 0; - private static final int STRING_ENCODING_EMPTY = 1; - private static final int STRING_ENCODING_CONSTANTPOOL = 2; - private static final int STRING_ENCODING_UTF8 = 3; - private static final int STRING_ENCODING_CHARARRAY = 4; - private static final int STRING_ENCODING_LATIN1 = 5; - - private ByteBuffer buffer; - private final ByteBuffer bigBuffer; - private final ByteBuffer smallBuffer; - - /** The offset of the file from where the {@link #buffer} starts */ - private long offset; - - private boolean wholeFileInBuffer; - @Nullable private FileChannel fileChannel; - - /** - * @param bigBuffer the buffer to be used to read the whole file if the file fits into it - * @param smallBuffer the buffer to be used to read chunks of the file in case the file is larger - * than bigBuffer. Constantly seeking a file with a large buffer is very bad for performance. - */ - public BufferedFile(ByteBuffer bigBuffer, ByteBuffer smallBuffer) { - this.bigBuffer = bigBuffer; - this.smallBuffer = smallBuffer; - } - - /** - * Sets the file and depending on it's size, may read the file into the {@linkplain #buffer - * buffer} - * - * @param file the file to read from - * @throws IOException If some I/O error occurs - */ - public void setFile(File file) throws IOException { - fileChannel = FileChannel.open(file.toPath(), StandardOpenOption.READ); - if (fileChannel.size() <= bigBuffer.capacity()) { - buffer = bigBuffer; - read(0, bigBuffer.capacity()); - wholeFileInBuffer = true; - } else { - buffer = smallBuffer; - Buffer buffer = this.buffer; - buffer.flip(); - } - } - - /** - * Returns the position of the file - * - * @return the position of the file - */ - public long position() { - return offset + buffer.position(); - } - - /** - * Skips the provided number of bytes in the file without reading new data. - * - * @param bytesToSkip the number of bytes to skip - */ - public void skip(int bytesToSkip) { - position(position() + bytesToSkip); - } - - public void skipString() throws IOException { - readOrSkipString(get(), null); - } - - /** - * @param output the buffer to place the string intro - * @return false, if the string to read is null, true otherwise - */ - @Nullable - public boolean readString(StringBuilder output) throws IOException { - byte encoding = get(); - if (encoding == 0) { // 0 encoding represents a null string - return false; - } - readOrSkipString(encoding, output); - return true; - } - - @Nullable - public String readString() throws IOException { - byte encoding = get(); - if (encoding == STRING_ENCODING_NULL) { - return null; - } - if (encoding == STRING_ENCODING_EMPTY) { - return ""; - } - StringBuilder output = new StringBuilder(); - readOrSkipString(encoding, output); - return output.toString(); - } - - private void readOrSkipString(byte encoding, @Nullable StringBuilder output) throws IOException { - switch (encoding) { - case STRING_ENCODING_NULL: - case STRING_ENCODING_EMPTY: - return; - case STRING_ENCODING_CONSTANTPOOL: - if (output != null) { - throw new IllegalStateException("Reading constant pool string is not supported"); - } - getVarLong(); - return; - case STRING_ENCODING_UTF8: - readOrSkipUtf8(output); - return; - case STRING_ENCODING_CHARARRAY: - throw new IllegalStateException("Char-array encoding is not supported by the parser yet"); - case STRING_ENCODING_LATIN1: - if (output != null) { - throw new IllegalStateException("Reading LATIN1 encoded string is not supported"); - } - skip(getVarInt()); - return; - default: - throw new IllegalStateException("Unknown string encoding type: " + encoding); - } - } - - private void readOrSkipUtf8(@Nullable StringBuilder output) throws IOException { - int len = getVarInt(); - if (output == null) { - skip(len); - return; - } - ensureRemaining(len, len); - - for (int i = 0; i < len; i++) { - byte hopefullyAscii = getUnsafe(); - if (hopefullyAscii > 0) { - output.append((char) hopefullyAscii); - } else { - // encountered non-ascii character: fallback to allocating and UTF8-decoding - position(position() - 1); // reset position before the just read byte - byte[] utf8Data = new byte[len - i]; - buffer.get(utf8Data); - output.append(new String(utf8Data, StandardCharsets.UTF_8)); - return; - } - } - } - - /** - * Sets the position of the file without reading new data. - * - * @param pos the new position - */ - public void position(long pos) { - Buffer buffer = this.buffer; - long positionDelta = pos - position(); - long newBufferPos = buffer.position() + positionDelta; - if (0 <= newBufferPos && newBufferPos <= buffer.limit()) { - buffer.position((int) newBufferPos); - } else { - // makes sure that the next ensureRemaining will load from file - buffer.position(0); - buffer.limit(0); - offset = pos; - } - } - - /** - * Ensures that the provided number of bytes are available in the {@linkplain #buffer buffer} - * - * @param minRemaining the number of bytes which are guaranteed to be available in the {@linkplain - * #buffer buffer} - * @throws IOException If some I/O error occurs - * @throws IllegalStateException If minRemaining is greater than the buffer's capacity - */ - public void ensureRemaining(int minRemaining) throws IOException { - ensureRemaining(minRemaining, buffer.capacity()); - } - - /** - * Ensures that the provided number of bytes are available in the {@linkplain #buffer buffer} - * - * @param minRemaining the number of bytes which are guaranteed to be available in the {@linkplain - * #buffer buffer} - * @param maxRead the max number of bytes to read from the file in case the buffer does currently - * not hold {@code minRemaining} bytes - * @throws IOException If some I/O error occurs - * @throws IllegalStateException If minRemaining is greater than the buffer's capacity - */ - public void ensureRemaining(int minRemaining, int maxRead) throws IOException { - if (wholeFileInBuffer) { - return; - } - if (minRemaining > buffer.capacity()) { - throw new IllegalStateException( - String.format( - "Length (%d) greater than buffer capacity (%d)", minRemaining, buffer.capacity())); - } - if (buffer.remaining() < minRemaining) { - read(position(), maxRead); - } - } - - /** - * Gets a byte from the current {@linkplain #position() position} of this file. If the {@linkplain - * #buffer buffer} does not fully contain this byte, loads another slice of the file into the - * buffer. - * - * @return The byte at the file's current position - * @throws IOException If some I/O error occurs - */ - public byte get() throws IOException { - ensureRemaining(SIZE_OF_BYTE); - return buffer.get(); - } - - /** - * Gets a short from the current {@linkplain #position() position} of this file. If the - * {@linkplain #buffer buffer} does not fully contain this short, loads another slice of the file - * into the buffer. - * - * @return The short at the file's current position - * @throws IOException If some I/O error occurs - */ - public short getShort() throws IOException { - ensureRemaining(SIZE_OF_SHORT); - return buffer.getShort(); - } - - /** - * Gets a short from the current {@linkplain #position() position} of this file. If the - * {@linkplain #buffer buffer} does not fully contain this short, loads another slice of the file - * into the buffer. - * - * @return The short at the file's current position - * @throws IOException If some I/O error occurs - */ - public int getUnsignedShort() throws IOException { - return getShort() & 0xffff; - } - - /** - * Gets a int from the current {@linkplain #position() position} of this file and converts it to - * an unsigned short. If the {@linkplain #buffer buffer} does not fully contain this int, loads - * another slice of the file into the buffer. - * - * @return The int at the file's current position - * @throws IOException If some I/O error occurs - */ - public int getInt() throws IOException { - ensureRemaining(SIZE_OF_INT); - return buffer.getInt(); - } - - /** - * Gets a long from the current {@linkplain #position() position} of this file. If the {@linkplain - * #buffer buffer} does not fully contain this long, loads another slice of the file into the - * buffer. - * - * @return The long at the file's current position - * @throws IOException If some I/O error occurs - */ - public long getLong() throws IOException { - ensureRemaining(SIZE_OF_LONG); - return buffer.getLong(); - } - - /** Reads LEB-128 variable length encoded values of a size of up to 64 bit. */ - public long getVarLong() throws IOException { - long value = 0; - boolean hasNext = true; - int shift = 0; - while (hasNext) { - long byteVal = ((int) get()); - hasNext = (byteVal & 0x80) != 0; - value |= (byteVal & 0x7F) << shift; - shift += 7; - } - return value; - } - - public int getVarInt() throws IOException { - long val = getVarLong(); - if ((int) val != val) { - throw new IllegalArgumentException("The LEB128 encoded value does not fit in an int"); - } - return (int) val; - } - - /** - * Gets a byte from the underlying buffer without checking if this part of the file is actually in - * the buffer. - * - *

Always mare sure to call {@link #ensureRemaining} before. - * - * @return The byte at the file's current position - * @throws java.nio.BufferUnderflowException If the buffer's current position is not smaller than - * its limit - */ - public byte getUnsafe() { - return buffer.get(); - } - - /** - * Gets a short from the underlying buffer without checking if this part of the file is actually - * in the buffer. - * - *

Always mare sure to call {@link #ensureRemaining} before. - * - * @return The byte at the file's current position - * @throws java.nio.BufferUnderflowException If there are fewer than two bytes remaining in this - * buffer - */ - public short getUnsafeShort() { - return buffer.getShort(); - } - - /** - * Gets an int from the underlying buffer without checking if this part of the file is actually in - * the buffer. - * - *

Always mare sure to call {@link #ensureRemaining} before. - * - * @return The byte at the file's current position - * @throws java.nio.BufferUnderflowException If there are fewer than four bytes remaining in this - * buffer - */ - public int getUnsafeInt() { - return buffer.getInt(); - } - - /** - * Gets a long from the underlying buffer without checking if this part of the file is actually in - * the buffer. - * - *

Always mare sure to call {@link #ensureRemaining} before. - * - * @return The byte at the file's current position - * @throws java.nio.BufferUnderflowException If there are fewer than eight bytes remaining in this - * buffer - */ - public long getUnsafeLong() { - return buffer.getLong(); - } - - public long size() throws IOException { - if (fileChannel == null) { - throw new IllegalStateException("setFile has not been called yet"); - } - return fileChannel.size(); - } - - public boolean isSet() { - return fileChannel != null; - } - - @Override - public void resetState() { - if (fileChannel == null) { - throw new IllegalStateException("setFile has not been called yet"); - } - Buffer buffer = this.buffer; - buffer.clear(); - offset = 0; - wholeFileInBuffer = false; - try { - fileChannel.close(); - } catch (IOException ignore) { - } - fileChannel = null; - this.buffer = null; - } - - private void read(long offset, int limit) throws IOException { - if (limit > buffer.capacity()) { - limit = buffer.capacity(); - } - Buffer buffer = this.buffer; - buffer.clear(); - fileChannel.position(offset); - buffer.limit(limit); - fileChannel.read(this.buffer); - buffer.flip(); - this.offset = offset; - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/asyncprofiler/JfrParser.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/asyncprofiler/JfrParser.java deleted file mode 100644 index 0ee5172f..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/asyncprofiler/JfrParser.java +++ /dev/null @@ -1,499 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler.asyncprofiler; - -import co.elastic.otel.common.config.WildcardMatcher; -import co.elastic.otel.profiler.StackFrame; -import co.elastic.otel.profiler.collections.Int2IntHashMap; -import co.elastic.otel.profiler.collections.Int2ObjectHashMap; -import co.elastic.otel.profiler.collections.Long2LongHashMap; -import co.elastic.otel.profiler.collections.Long2ObjectHashMap; -import co.elastic.otel.profiler.pooling.Recyclable; -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Objects; -import java.util.Set; -import java.util.logging.Level; -import java.util.logging.Logger; -import javax.annotation.Nullable; - -/** - * Parses the binary JFR file created by async-profiler. May not work with JFR files created by an - * actual flight recorder. - * - *

The implementation is tuned with to minimize allocations when parsing a JFR file. Most data - * structures can be reused by first {@linkplain #resetState() resetting the state} and then - * {@linkplain #parse(File, List, List) parsing} another file. - */ -public class JfrParser implements Recyclable { - - private static final Logger logger = Logger.getLogger(JfrParser.class.getName()); - - private static final byte[] MAGIC_BYTES = new byte[] {'F', 'L', 'R', '\0'}; - private static final Set JAVA_FRAME_TYPES = - new HashSet<>(Arrays.asList("Interpreted", "JIT compiled", "Inlined")); - private static final int BIG_FILE_BUFFER_SIZE = 5 * 1024 * 1024; - private static final int SMALL_FILE_BUFFER_SIZE = 4 * 1024; - private static final String SYMBOL_EXCLUDED = "3x cluded"; - private static final String SYMBOL_NULL = "n u11"; - private static final StackFrame FRAME_EXCLUDED = new StackFrame("excluded", "excluded"); - private static final StackFrame FRAME_NULL = new StackFrame("null", "null"); - - private final BufferedFile bufferedFile; - private final Int2IntHashMap classIdToClassNameSymbolId = new Int2IntHashMap(-1); - private final Int2IntHashMap symbolIdToPos = new Int2IntHashMap(-1); - private final Int2ObjectHashMap symbolIdToString = new Int2ObjectHashMap(); - private final Int2IntHashMap stackTraceIdToFilePositions = new Int2IntHashMap(-1); - private final Long2LongHashMap nativeTidToJavaTid = new Long2LongHashMap(-1); - private final Long2ObjectHashMap methodIdToFrame = - new Long2ObjectHashMap(); - private final Long2LongHashMap methodIdToMethodNameSymbol = new Long2LongHashMap(-1); - private final Long2LongHashMap methodIdToClassId = new Long2LongHashMap(-1); - // used to resolve a symbol with minimal allocations - private final StringBuilder symbolBuilder = new StringBuilder(); - private long eventsFilePosition; - private long metadataFilePosition; - @Nullable private boolean[] isJavaFrameType; - @Nullable private List excludedClasses; - @Nullable private List includedClasses; - - public JfrParser() { - this( - ByteBuffer.allocateDirect(BIG_FILE_BUFFER_SIZE), - ByteBuffer.allocateDirect(SMALL_FILE_BUFFER_SIZE)); - } - - JfrParser(ByteBuffer bigBuffer, ByteBuffer smallBuffer) { - bufferedFile = new BufferedFile(bigBuffer, smallBuffer); - } - - /** - * Initializes the parser to make it ready for {@link #resolveStackTrace(long, List, int)} to be - * called. - * - * @param file the JFR file to parse - * @param excludedClasses Class names to exclude in stack traces (has an effect on {@link - * #resolveStackTrace(long, List, int)}) - * @param includedClasses Class names to include in stack traces (has an effect on {@link - * #resolveStackTrace(long, List, int)}) - * @throws IOException if some I/O error occurs - */ - public void parse( - File file, List excludedClasses, List includedClasses) - throws IOException { - this.excludedClasses = excludedClasses; - this.includedClasses = includedClasses; - bufferedFile.setFile(file); - long fileSize = bufferedFile.size(); - - int chunkSize = readChunk(0); - if (chunkSize < fileSize) { - throw new IllegalStateException( - "This implementation does not support reading JFR files containing multiple chunks"); - } - } - - private int readChunk(int position) throws IOException { - bufferedFile.position(position); - if (logger.isLoggable(Level.FINE)) { - logger.log(Level.FINE, "Parsing JFR chunk at offset", new Object[] {position}); - } - for (byte magicByte : MAGIC_BYTES) { - if (bufferedFile.get() != magicByte) { - throw new IllegalArgumentException("Not a JFR file"); - } - } - short major = bufferedFile.getShort(); - short minor = bufferedFile.getShort(); - if (major != 2 || minor != 0) { - throw new IllegalArgumentException( - String.format("Can only parse version 2.0. Was %d.%d", major, minor)); - } - long chunkSize = bufferedFile.getLong(); - long constantPoolOffset = bufferedFile.getLong(); - metadataFilePosition = position + bufferedFile.getLong(); - bufferedFile.getLong(); // startTimeNanos - bufferedFile.getLong(); // durationNanos - bufferedFile.getLong(); // startTicks - bufferedFile.getLong(); // ticksPerSecond - bufferedFile.getInt(); // features - - // Events start right after metadata - eventsFilePosition = metadataFilePosition + parseMetadata(metadataFilePosition); - parseCheckpointEvents(position + constantPoolOffset); - return (int) chunkSize; - } - - private long parseMetadata(long metadataOffset) throws IOException { - bufferedFile.position(metadataOffset); - int size = bufferedFile.getVarInt(); - expectEventType(EventTypeId.EVENT_METADATA); - return size; - } - - private void expectEventType(int expectedEventType) throws IOException { - long eventType = bufferedFile.getVarLong(); - if (eventType != expectedEventType) { - throw new IOException("Expected " + expectedEventType + " but got " + eventType); - } - } - - private void parseCheckpointEvents(long checkpointOffset) throws IOException { - bufferedFile.position(checkpointOffset); - bufferedFile.getVarInt(); // size - expectEventType(EventTypeId.EVENT_CHECKPOINT); - bufferedFile.getVarLong(); // start - bufferedFile.getVarLong(); // duration - long delta = bufferedFile.getVarLong(); - if (delta != 0) { - throw new IllegalStateException( - "Expected only one checkpoint event, but file contained multiple, delta is " + delta); - } - bufferedFile.get(); // typeMask - long poolCount = bufferedFile.getVarLong(); - for (int i = 0; i < poolCount; i++) { - parseConstantPool(); - } - } - - private void parseConstantPool() throws IOException { - long typeId = bufferedFile.getVarLong(); - int count = bufferedFile.getVarInt(); - - switch ((int) typeId) { - case ContentTypeId.CONTENT_FRAME_TYPE: - readFrameTypeConstants(count); - break; - case ContentTypeId.CONTENT_THREAD_STATE: - case ContentTypeId.CONTENT_GC_WHEN: - case ContentTypeId.CONTENT_LOG_LEVELS: - // We are not interested in those types, but still have to consume the bytes - for (int i = 0; i < count; i++) { - bufferedFile.getVarInt(); - bufferedFile.skipString(); - } - break; - case ContentTypeId.CONTENT_THREAD: - readThreadConstants(count); - break; - case ContentTypeId.CONTENT_STACKTRACE: - readStackTraceConstants(count); - break; - case ContentTypeId.CONTENT_METHOD: - readMethodConstants(count); - break; - case ContentTypeId.CONTENT_CLASS: - readClassConstants(count); - break; - case ContentTypeId.CONTENT_PACKAGE: - readPackageConstants(count); - break; - case ContentTypeId.CONTENT_SYMBOL: - readSymbolConstants(count); - break; - default: - throw new IllegalStateException("Unhandled constant pool type: " + typeId); - } - } - - private void readSymbolConstants(int count) throws IOException { - for (int i = 0; i < count; i++) { - int symbolId = bufferedFile.getVarInt(); - int pos = (int) bufferedFile.position(); - bufferedFile.skipString(); - symbolIdToPos.put(symbolId, pos); - symbolIdToString.put(symbolId, SYMBOL_NULL); - } - } - - private void readClassConstants(int count) throws IOException { - for (int i = 0; i < count; i++) { - int classId = bufferedFile.getVarInt(); - bufferedFile.getVarInt(); // classloader, always zero in async-profiler JFR files - int classNameSymbolId = bufferedFile.getVarInt(); - classIdToClassNameSymbolId.put(classId, classNameSymbolId); // class name - bufferedFile.getVarInt(); // package symbol id - bufferedFile.getVarInt(); // access flags - } - } - - private void readMethodConstants(int count) throws IOException { - for (int i = 0; i < count; i++) { - long id = bufferedFile.getVarLong(); - int classId = bufferedFile.getVarInt(); - // symbol ids are incrementing integers, no way there are more than 2 billion distinct - // ones - int methodNameSymbolId = bufferedFile.getVarInt(); - methodIdToFrame.put(id, FRAME_NULL); - methodIdToClassId.put(id, classId); - methodIdToMethodNameSymbol.put(id, methodNameSymbolId); - bufferedFile.getVarLong(); // signature - bufferedFile.getVarInt(); // modifiers - bufferedFile.get(); // hidden - } - } - - private void readPackageConstants(int count) throws IOException { - for (int i = 0; i < count; i++) { - bufferedFile.getVarLong(); // id - bufferedFile.getVarLong(); // symbol-id of package name - } - } - - private void readThreadConstants(int count) throws IOException { - for (int i = 0; i < count; i++) { - int nativeThreadId = bufferedFile.getVarInt(); - bufferedFile.skipString(); // native thread name - bufferedFile.getVarInt(); // native thread ID again - bufferedFile.skipString(); // java thread name - long javaThreadId = bufferedFile.getVarLong(); - if (javaThreadId != 0) { // javaThreadId will be null for native-only threads - nativeTidToJavaTid.put(nativeThreadId, javaThreadId); - } - } - } - - private void readStackTraceConstants(int count) throws IOException { - for (int i = 0; i < count; i++) { - - int stackTraceId = bufferedFile.getVarInt(); - bufferedFile.get(); // truncated byte, always zero anyway - - this.stackTraceIdToFilePositions.put(stackTraceId, (int) bufferedFile.position()); - // We need to skip the stacktrace to get to the position of the next one - readOrSkipStacktraceFrames(null, 0); - } - } - - private void readFrameTypeConstants(int count) throws IOException { - isJavaFrameType = new boolean[count]; - for (int i = 0; i < count; i++) { - int id = bufferedFile.getVarInt(); - if (i != id) { - throw new IllegalStateException("Expecting ids to be incrementing"); - } - isJavaFrameType[id] = JAVA_FRAME_TYPES.contains(bufferedFile.readString()); - } - } - - /** - * Invokes the callback for each stack trace event in the JFR file. - * - * @param callback called for each stack trace event - * @throws IOException if some I/O error occurs - */ - public void consumeStackTraces(StackTraceConsumer callback) throws IOException { - if (!bufferedFile.isSet()) { - throw new IllegalStateException("consumeStackTraces was called before parse"); - } - bufferedFile.position(eventsFilePosition); - long fileSize = bufferedFile.size(); - long eventStart = eventsFilePosition; - while (eventStart < fileSize) { - bufferedFile.position(eventStart); - int eventSize = bufferedFile.getVarInt(); - long eventType = bufferedFile.getVarLong(); - if (eventType == EventTypeId.EVENT_EXECUTION_SAMPLE) { - long nanoTime = bufferedFile.getVarLong(); - int tid = bufferedFile.getVarInt(); - int stackTraceId = bufferedFile.getVarInt(); - bufferedFile.getVarInt(); // thread state - long javaThreadId = nativeTidToJavaTid.get(tid); - callback.onCallTree(javaThreadId, stackTraceId, nanoTime); - } - eventStart += eventSize; - } - } - - /** - * Resolves the stack trace with the given {@code stackTraceId}. Only java frames will be - * included. - * - *

Note that his allocates strings for symbols in case a stack frame has not already been - * resolved for the current JFR file yet. These strings are currently not cached so this can - * create some GC pressure. - * - *

Excludes frames based on the {@link WildcardMatcher}s supplied to {@link #parse(File, List, - * List)}. - * - * @param stackTraceId The id of the stack traced. Used to look up the position of the file in - * which the given stack trace is stored via {@link #stackTraceIdToFilePositions}. - * @param stackFrames The mutable list where the stack frames are written to. Don't forget to - * {@link List#clear()} the list before calling this method if the list is reused. - * @param maxStackDepth The max size of the stackFrames list (excluded frames don't take up - * space). In contrast to async-profiler's {@code jstackdepth} argument this does not truncate - * the bottom of the stack, only the top. This is important to properly create a call tree - * without making it overly complex. - * @throws IOException if there is an error reading in current buffer - */ - public void resolveStackTrace(long stackTraceId, List stackFrames, int maxStackDepth) - throws IOException { - if (!bufferedFile.isSet()) { - throw new IllegalStateException("getStackTrace was called before parse"); - } - bufferedFile.position(stackTraceIdToFilePositions.get((int) stackTraceId)); - readOrSkipStacktraceFrames(stackFrames, maxStackDepth); - } - - private void readOrSkipStacktraceFrames(@Nullable List stackFrames, int maxStackDepth) - throws IOException { - int frameCount = bufferedFile.getVarInt(); - for (int i = 0; i < frameCount; i++) { - int methodId = bufferedFile.getVarInt(); - bufferedFile.getVarInt(); // line number - bufferedFile.getVarInt(); // bytecode index - byte type = bufferedFile.get(); - if (stackFrames != null) { - addFrameIfIncluded(stackFrames, methodId, type); - if (stackFrames.size() > maxStackDepth) { - stackFrames.remove(0); - } - } - } - } - - private void addFrameIfIncluded(List stackFrames, int methodId, byte frameType) - throws IOException { - if (isJavaFrameType(frameType)) { - StackFrame stackFrame = resolveStackFrame(methodId); - if (stackFrame != FRAME_EXCLUDED) { - stackFrames.add(stackFrame); - } - } - } - - private boolean isJavaFrameType(byte frameType) { - return isJavaFrameType[frameType]; - } - - private String resolveSymbol(int id, boolean classSymbol) throws IOException { - String symbol = symbolIdToString.get(id); - if (symbol != SYMBOL_NULL) { - return symbol; - } - - long previousPosition = bufferedFile.position(); - int position = symbolIdToPos.get(id); - bufferedFile.position(position); - symbolBuilder.setLength(0); - bufferedFile.readString(symbolBuilder); - bufferedFile.position(previousPosition); - - if (classSymbol) { - replaceSlashesWithDots(symbolBuilder); - } - - if (classSymbol && !isClassIncluded(symbolBuilder)) { - symbol = SYMBOL_EXCLUDED; - } else { - symbol = symbolBuilder.toString(); - } - symbolIdToString.put(id, symbol); - return symbol; - } - - private static void replaceSlashesWithDots(StringBuilder builder) { - for (int i = 0; i < builder.length(); i++) { - if (builder.charAt(i) == '/') { - builder.setCharAt(i, '.'); - } - } - } - - private boolean isClassIncluded(CharSequence className) { - return WildcardMatcher.isAnyMatch(includedClasses, className) - && WildcardMatcher.isNoneMatch(excludedClasses, className); - } - - private StackFrame resolveStackFrame(long frameId) throws IOException { - StackFrame stackFrame = methodIdToFrame.get(frameId); - if (stackFrame != FRAME_NULL) { - return stackFrame; - } - String className = - resolveSymbol(classIdToClassNameSymbolId.get((int) methodIdToClassId.get(frameId)), true); - if (className == SYMBOL_EXCLUDED) { - stackFrame = FRAME_EXCLUDED; - } else { - String method = resolveSymbol((int) methodIdToMethodNameSymbol.get(frameId), false); - stackFrame = new StackFrame(className, Objects.requireNonNull(method)); - } - methodIdToFrame.put(frameId, stackFrame); - return stackFrame; - } - - @Override - public void resetState() { - bufferedFile.resetState(); - eventsFilePosition = 0; - metadataFilePosition = 0; - isJavaFrameType = null; - classIdToClassNameSymbolId.clear(); - stackTraceIdToFilePositions.clear(); - methodIdToFrame.clear(); - methodIdToMethodNameSymbol.clear(); - methodIdToClassId.clear(); - symbolBuilder.setLength(0); - excludedClasses = null; - includedClasses = null; - symbolIdToPos.clear(); - symbolIdToString.clear(); - } - - public interface StackTraceConsumer { - - /** - * @param threadId The {@linkplain Thread#getId() Java thread id} for with the event was - * recorded. - * @param stackTraceId The id of the stack trace event. Can be used to resolve the stack trace - * via {@link #resolveStackTrace(long, List, int)} - * @param nanoTime The timestamp of the event which can be correlated with {@link - * System#nanoTime()} - * @throws IOException if there is any error reading stack trace - */ - void onCallTree(long threadId, long stackTraceId, long nanoTime) throws IOException; - } - - private interface EventTypeId { - int EVENT_METADATA = 0; - int EVENT_CHECKPOINT = 1; - - // The following event types actually are defined in the metadata of the JFR file itself - // for simplicity and performance, we hardcode the values used by the async-profiler - // implementation - int EVENT_EXECUTION_SAMPLE = 101; - } - - private interface ContentTypeId { - int CONTENT_THREAD = 22; - int CONTENT_LOG_LEVELS = 33; - int CONTENT_STACKTRACE = 26; - int CONTENT_CLASS = 21; - int CONTENT_METHOD = 28; - int CONTENT_SYMBOL = 31; - int CONTENT_THREAD_STATE = 25; - int CONTENT_FRAME_TYPE = 24; - int CONTENT_GC_WHEN = 32; - int CONTENT_PACKAGE = 30; - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/asyncprofiler/ResourceExtractionUtil.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/asyncprofiler/ResourceExtractionUtil.java deleted file mode 100644 index 97024407..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/asyncprofiler/ResourceExtractionUtil.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler.asyncprofiler; - -import static java.nio.file.LinkOption.NOFOLLOW_LINKS; -import static java.nio.file.StandardOpenOption.CREATE_NEW; -import static java.nio.file.StandardOpenOption.READ; -import static java.nio.file.StandardOpenOption.WRITE; -import static java.nio.file.attribute.PosixFilePermission.OWNER_READ; -import static java.nio.file.attribute.PosixFilePermission.OWNER_WRITE; - -import java.io.IOException; -import java.io.InputStream; -import java.math.BigInteger; -import java.nio.channels.Channels; -import java.nio.channels.FileChannel; -import java.nio.channels.FileLock; -import java.nio.file.FileAlreadyExistsException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.attribute.FileAttribute; -import java.nio.file.attribute.PosixFilePermissions; -import java.nio.file.attribute.UserPrincipal; -import java.security.DigestInputStream; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.EnumSet; - -public class ResourceExtractionUtil { - - /** - * Extracts a classpath resource to {@code - * ${System.getProperty("java.io.tmpdir")}/$prefix-$hash.$suffix}. If the file has already been - * extracted it will not be extracted again. - * - * @param resource The classpath resource to extract. - * @param prefix The prefix of the extracted file. - * @param suffix The suffix of the extracted file. - * @return the extracted file. - */ - public static synchronized Path extractResourceToTempDirectory( - String resource, String prefix, String suffix) { - return extractResourceToDirectory( - resource, prefix, suffix, Paths.get(System.getProperty("java.io.tmpdir"))); - } - - /** - * Extracts a classpath resource to {@code $directory/$prefix-$userHash-$hash.$suffix}. If the - * file has already been extracted it will not be extracted again. - * - * @param resource The classpath resource to extract. - * @param prefix The prefix of the extracted file. - * @param suffix The suffix of the extracted file. - * @param directory The directory in which the file is to be created, or null if the default - * temporary-file directory is to be used. - * @return the extracted file. - */ - /* - * Why it's synchronized : if the same JVM try to lock file, we got an java.nio.channels.OverlappingFileLockException. - * So we need to block until the file is totally written. - */ - public static synchronized Path extractResourceToDirectory( - String resource, String prefix, String suffix, Path directory) { - try (InputStream resourceStream = - ResourceExtractionUtil.class.getResourceAsStream("/" + resource)) { - if (resourceStream == null) { - throw new IllegalStateException(resource + " not found"); - } - UserPrincipal currentUserPrincipal = getCurrentUserPrincipal(); - // we have to include current user name as multiple copies of the same agent could be attached - // to multiple JVMs, each running under a different user. Hashing makes the name - // path-friendly. - String userHash = hash(currentUserPrincipal.getName()); - // to guard against re-using previous versions - String resourceHash = hash(ResourceExtractionUtil.class.getResourceAsStream("/" + resource)); - - Path tempFile = - directory.resolve( - prefix - + "-" - + userHash.substring(0, 32) - + "-" - + resourceHash.substring(0, 32) - + suffix); - try { - FileAttribute[] attr; - if (tempFile.getFileSystem().supportedFileAttributeViews().contains("posix")) { - attr = - new FileAttribute[] { - PosixFilePermissions.asFileAttribute(EnumSet.of(OWNER_WRITE, OWNER_READ)) - }; - } else { - attr = new FileAttribute[0]; - } - try (FileChannel channel = - FileChannel.open(tempFile, EnumSet.of(CREATE_NEW, WRITE), attr)) { - // make other JVM instances wait until fully written - try (FileLock writeLock = channel.lock()) { - channel.transferFrom(Channels.newChannel(resourceStream), 0, Long.MAX_VALUE); - } - } - } catch (FileAlreadyExistsException e) { - try (FileChannel channel = FileChannel.open(tempFile, READ, NOFOLLOW_LINKS)) { - // wait until other JVM instances have fully written the file - // multiple JVMs can read the file at the same time - try (FileLock readLock = channel.lock(0, Long.MAX_VALUE, true)) { - if (!hash(Files.newInputStream(tempFile)).equals(resourceHash)) { - throw new IllegalStateException( - "Invalid checksum of " + tempFile + ". Please delete this file."); - } else if (!Files.getOwner(tempFile).equals(currentUserPrincipal)) { - throw new IllegalStateException( - "File " - + tempFile - + " is not owned by '" - + currentUserPrincipal.getName() - + "'. Please delete this file."); - } - } - } - } - return tempFile.toAbsolutePath(); - } catch (NoSuchAlgorithmException | IOException e) { - throw new IllegalStateException(e); - } - } - - private static UserPrincipal getCurrentUserPrincipal() throws IOException { - Path whoami = Files.createTempFile("whoami", ".tmp"); - try { - return Files.getOwner(whoami); - } finally { - Files.delete(whoami); - } - } - - private static String hash(InputStream resourceAsStream) - throws IOException, NoSuchAlgorithmException { - try (InputStream is = resourceAsStream) { - MessageDigest md = MessageDigest.getInstance("SHA-256"); - byte[] buffer = new byte[1024]; - DigestInputStream dis = new DigestInputStream(is, md); - while (dis.read(buffer) != -1) {} - return new BigInteger(1, md.digest()).toString(16); - } - } - - private static String hash(String s) throws NoSuchAlgorithmException { - MessageDigest md = MessageDigest.getInstance("SHA-256"); - md.update(s.getBytes()); - return new BigInteger(1, md.digest()).toString(16); - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/CollectionUtil.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/CollectionUtil.java deleted file mode 100644 index e28a5794..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/CollectionUtil.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Copyright 2014-2020 Real Logic Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package co.elastic.otel.profiler.collections; - -/** Utility functions for collection objects. */ -public class CollectionUtil { - /** - * Validate that a load factor is in the range of 0.1 to 0.9. - * - *

Load factors in the range 0.5 - 0.7 are recommended for open-addressing with linear probing. - * - * @param loadFactor to be validated. - */ - public static void validateLoadFactor(final float loadFactor) { - if (loadFactor < 0.1f || loadFactor > 0.9f) { - throw new IllegalArgumentException( - "load factor must be in the range of 0.1 to 0.9: " + loadFactor); - } - } - - /** - * Fast method of finding the next power of 2 greater than or equal to the supplied value. - * - *

If the value is <= 0 then 1 will be returned. - * - *

This method is not suitable for {@link Integer#MIN_VALUE} or numbers greater than 2^30. When - * provided then {@link Integer#MIN_VALUE} will be returned. - * - * @param value from which to search for next power of 2. - * @return The next power of 2 or the value itself if it is a power of 2. - */ - public static int findNextPositivePowerOfTwo(final int value) { - return 1 << (Integer.SIZE - Integer.numberOfLeadingZeros(value - 1)); - } - - /** - * Fast method of finding the next power of 2 greater than or equal to the supplied value. - * - *

If the value is <= 0 then 1 will be returned. - * - *

This method is not suitable for {@link Long#MIN_VALUE} or numbers greater than 2^62. When - * provided then {@link Long#MIN_VALUE} will be returned. - * - * @param value from which to search for next power of 2. - * @return The next power of 2 or the value itself if it is a power of 2. - */ - public static long findNextPositivePowerOfTwo(final long value) { - return 1L << (Long.SIZE - Long.numberOfLeadingZeros(value - 1)); - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Hashing.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Hashing.java deleted file mode 100644 index 65fd9df5..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Hashing.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Copyright 2014-2020 Real Logic Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package co.elastic.otel.profiler.collections; - -/** Hashing functions for applying to integers. */ -public class Hashing { - /** Default load factor to be used in open addressing hashed data structures. */ - public static final float DEFAULT_LOAD_FACTOR = 0.55f; - - /** - * Generate a hash for an int value. This is a no op. - * - * @param value to be hashed. - * @return the hashed value. - */ - public static int hash(final int value) { - return value * 31; - } - - /** - * Generate a hash for an long value. - * - * @param value to be hashed. - * @return the hashed value. - */ - public static int hash(final long value) { - long hash = value * 31; - hash = (int) hash ^ (int) (hash >>> 32); - - return (int) hash; - } - - /** - * Generate a hash for a int value. - * - * @param value to be hashed. - * @param mask mask to be applied that must be a power of 2 - 1. - * @return the hash of the value. - */ - public static int hash(final int value, final int mask) { - final int hash = value * 31; - - return hash & mask; - } - - /** - * Generate a hash for a K value. - * - * @param is the type of value - * @param value to be hashed. - * @param mask mask to be applied that must be a power of 2 - 1. - * @return the hash of the value. - */ - public static int hash(final K value, final int mask) { - final int hash = value.hashCode(); - - return hash & mask; - } - - /** - * Generate a hash for a long value. - * - * @param value to be hashed. - * @param mask mask to be applied that must be a power of 2 - 1. - * @return the hash of the value. - */ - public static int hash(final long value, final int mask) { - long hash = value * 31; - hash = (int) hash ^ (int) (hash >>> 32); - - return (int) hash & mask; - } - - /** - * Generate an even hash for a int value. - * - * @param value to be hashed. - * @param mask mask to be applied that must be a power of 2 - 1. - * @return the hash of the value which is always even. - */ - public static int evenHash(final int value, final int mask) { - final int hash = (value << 1) - (value << 8); - - return hash & mask; - } - - /** - * Generate an even hash for a long value. - * - * @param value to be hashed. - * @param mask mask to be applied that must be a power of 2 - 1. - * @return the hash of the value which is always even. - */ - public static int evenHash(final long value, final int mask) { - int hash = (int) value ^ (int) (value >>> 32); - hash = (hash << 1) - (hash << 8); - - return hash & mask; - } - - /** - * Combined two 32 bit keys into a 64-bit compound. - * - * @param keyPartA to make the upper bits - * @param keyPartB to make the lower bits. - * @return the compound key - */ - public static long compoundKey(final int keyPartA, final int keyPartB) { - return ((long) keyPartA << 32) | (keyPartB & 0xFFFF_FFFFL); - } - - public static int hashCode(long value) { - return (int) (value ^ (value >>> 32)); - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Int2IntHashMap.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Int2IntHashMap.java deleted file mode 100644 index 5014d772..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Int2IntHashMap.java +++ /dev/null @@ -1,876 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Copyright 2014-2020 Real Logic Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package co.elastic.otel.profiler.collections; - -import java.io.Serializable; -import java.util.AbstractCollection; -import java.util.AbstractSet; -import java.util.Arrays; -import java.util.Iterator; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.Objects; - -/** A open addressing with linear probing hash map specialised for primitive key and value pairs. */ -public class Int2IntHashMap implements Map, Serializable { - static final int MIN_CAPACITY = 8; - - private final float loadFactor; - private final int missingValue; - private int resizeThreshold; - private int size = 0; - private final boolean shouldAvoidAllocation; - - private int[] entries; - private KeySet keySet; - private ValueCollection values; - private EntrySet entrySet; - - public Int2IntHashMap(final int missingValue) { - this(MIN_CAPACITY, Hashing.DEFAULT_LOAD_FACTOR, missingValue); - } - - public Int2IntHashMap(final int initialCapacity, final float loadFactor, final int missingValue) { - this(initialCapacity, loadFactor, missingValue, true); - } - - /** - * @param initialCapacity for the map to override {@link #MIN_CAPACITY} - * @param loadFactor for the map to override {@link Hashing#DEFAULT_LOAD_FACTOR}. - * @param missingValue for the map that represents null. - * @param shouldAvoidAllocation should allocation be avoided by caching iterators and map entries. - */ - public Int2IntHashMap( - final int initialCapacity, - final float loadFactor, - final int missingValue, - final boolean shouldAvoidAllocation) { - CollectionUtil.validateLoadFactor(loadFactor); - - this.loadFactor = loadFactor; - this.missingValue = missingValue; - this.shouldAvoidAllocation = shouldAvoidAllocation; - - capacity(CollectionUtil.findNextPositivePowerOfTwo(Math.max(MIN_CAPACITY, initialCapacity))); - } - - /** - * The value to be used as a null marker in the map. - * - * @return value to be used as a null marker in the map. - */ - public int missingValue() { - return missingValue; - } - - /** - * Get the load factor applied for resize operations. - * - * @return the load factor applied for resize operations. - */ - public float loadFactor() { - return loadFactor; - } - - /** - * Get the total capacity for the map to which the load factor will be a fraction of. - * - * @return the total capacity for the map. - */ - public int capacity() { - return entries.length >> 2; - } - - /** - * Get the actual threshold which when reached the map will resize. This is a function of the - * current capacity and load factor. - * - * @return the threshold when the map will resize. - */ - public int resizeThreshold() { - return resizeThreshold; - } - - /** {@inheritDoc} */ - public int size() { - return size; - } - - /** {@inheritDoc} */ - public boolean isEmpty() { - return size == 0; - } - - public int get(final int key) { - final int[] entries = this.entries; - final int missingValue = this.missingValue; - final int mask = entries.length - 1; - int index = Hashing.evenHash(key, mask); - - int value = missingValue; - while (entries[index + 1] != missingValue) { - if (entries[index] == key) { - value = entries[index + 1]; - break; - } - - index = next(index, mask); - } - - return value; - } - - /** - * Put a key value pair in the map. - * - * @param key lookup key - * @param value new value, must not be initialValue - * @return current counter value associated with key, or initialValue if none found - * @throws IllegalArgumentException if value is missingValue - */ - public int put(final int key, final int value) { - if (value == missingValue) { - throw new IllegalArgumentException("cannot accept missingValue"); - } - - final int[] entries = this.entries; - final int missingValue = this.missingValue; - final int mask = entries.length - 1; - int index = Hashing.evenHash(key, mask); - int oldValue = missingValue; - - while (entries[index + 1] != missingValue) { - if (entries[index] == key) { - oldValue = entries[index + 1]; - break; - } - - index = next(index, mask); - } - - if (oldValue == missingValue) { - ++size; - entries[index] = key; - } - - entries[index + 1] = value; - - increaseCapacity(); - - return oldValue; - } - - private void increaseCapacity() { - if (size > resizeThreshold) { - // entries.length = 2 * capacity - final int newCapacity = entries.length; - rehash(newCapacity); - } - } - - private void rehash(final int newCapacity) { - final int[] oldEntries = entries; - final int missingValue = this.missingValue; - final int length = entries.length; - - capacity(newCapacity); - - final int[] newEntries = entries; - final int mask = entries.length - 1; - - for (int keyIndex = 0; keyIndex < length; keyIndex += 2) { - final int value = oldEntries[keyIndex + 1]; - if (value != missingValue) { - final int key = oldEntries[keyIndex]; - int index = Hashing.evenHash(key, mask); - - while (newEntries[index + 1] != missingValue) { - index = next(index, mask); - } - - newEntries[index] = key; - newEntries[index + 1] = value; - } - } - } - - /** - * Primitive specialised forEach implementation. - * - *

NB: Renamed from forEach to avoid overloading on parameter types of lambda expression, which - * doesn't play well with type inference in lambda expressions. - * - * @param consumer a callback called for each key/value pair in the map. - */ - public void intForEach(final IntIntConsumer consumer) { - final int[] entries = this.entries; - final int missingValue = this.missingValue; - final int length = entries.length; - - for (int keyIndex = 0; keyIndex < length; keyIndex += 2) { - if (entries[keyIndex + 1] != missingValue) // lgtm [java/index-out-of-bounds] - { - consumer.accept( - entries[keyIndex], entries[keyIndex + 1]); // lgtm [java/index-out-of-bounds] - } - } - } - - /** - * Int primitive specialised containsKey. - * - * @param key the key to check. - * @return true if the map contains key as a key, false otherwise. - */ - public boolean containsKey(final int key) { - return get(key) != missingValue; - } - - /** - * Does the map contain the value. - * - * @param value to be tested against contained values. - * @return true if contained otherwise value. - */ - public boolean containsValue(final int value) { - boolean found = false; - if (value != missingValue) { - final int[] entries = this.entries; - final int length = entries.length; - - for (int valueIndex = 1; valueIndex < length; valueIndex += 2) { - if (value == entries[valueIndex]) { - found = true; - break; - } - } - } - - return found; - } - - /** {@inheritDoc} */ - public void clear() { - if (size > 0) { - Arrays.fill(entries, missingValue); - size = 0; - } - } - - /** - * Compact the backing arrays by rehashing with a capacity just larger than current size and - * giving consideration to the load factor. - */ - public void compact() { - final int idealCapacity = (int) Math.round(size() * (1.0d / loadFactor)); - rehash(CollectionUtil.findNextPositivePowerOfTwo(Math.max(MIN_CAPACITY, idealCapacity))); - } - - // ---------------- Boxed Versions Below ---------------- - - /** {@inheritDoc} */ - public Integer get(final Object key) { - return valOrNull(get((int) key)); - } - - /** {@inheritDoc} */ - public Integer put(final Integer key, final Integer value) { - return valOrNull(put((int) key, (int) value)); - } - - /** {@inheritDoc} */ - public boolean containsKey(final Object key) { - return containsKey((int) key); - } - - /** {@inheritDoc} */ - public boolean containsValue(final Object value) { - return containsValue((int) value); - } - - /** {@inheritDoc} */ - public void putAll(final Map map) { - for (final Entry entry : map.entrySet()) { - put(entry.getKey(), entry.getValue()); - } - } - - /** {@inheritDoc} */ - public KeySet keySet() { - if (null == keySet) { - keySet = new KeySet(); - } - - return keySet; - } - - /** {@inheritDoc} */ - public ValueCollection values() { - if (null == values) { - values = new ValueCollection(); - } - - return values; - } - - /** {@inheritDoc} */ - public EntrySet entrySet() { - if (null == entrySet) { - entrySet = new EntrySet(); - } - - return entrySet; - } - - /** {@inheritDoc} */ - public Integer remove(final Object key) { - return valOrNull(remove((int) key)); - } - - public int remove(final int key) { - final int[] entries = this.entries; - final int missingValue = this.missingValue; - final int mask = entries.length - 1; - int keyIndex = Hashing.evenHash(key, mask); - - int oldValue = missingValue; - while (entries[keyIndex + 1] != missingValue) { - if (entries[keyIndex] == key) { - oldValue = entries[keyIndex + 1]; - entries[keyIndex + 1] = missingValue; - size--; - - compactChain(keyIndex); - - break; - } - - keyIndex = next(keyIndex, mask); - } - - return oldValue; - } - - @SuppressWarnings("FinalParameters") - private void compactChain(int deleteKeyIndex) { - final int[] entries = this.entries; - final int missingValue = this.missingValue; - final int mask = entries.length - 1; - int keyIndex = deleteKeyIndex; - - while (true) { - keyIndex = next(keyIndex, mask); - if (entries[keyIndex + 1] == missingValue) { - break; - } - - final int hash = Hashing.evenHash(entries[keyIndex], mask); - - if ((keyIndex < hash && (hash <= deleteKeyIndex || deleteKeyIndex <= keyIndex)) - || (hash <= deleteKeyIndex && deleteKeyIndex <= keyIndex)) { - entries[deleteKeyIndex] = entries[keyIndex]; - entries[deleteKeyIndex + 1] = entries[keyIndex + 1]; - - entries[keyIndex + 1] = missingValue; - deleteKeyIndex = keyIndex; - } - } - } - - /** - * Get the minimum value stored in the map. If the map is empty then it will return {@link - * #missingValue()} - * - * @return the minimum value stored in the map. - */ - public int minValue() { - final int missingValue = this.missingValue; - int min = size == 0 ? missingValue : Integer.MAX_VALUE; - - final int[] entries = this.entries; - final int length = entries.length; - - for (int valueIndex = 1; valueIndex < length; valueIndex += 2) { - final int value = entries[valueIndex]; - if (value != missingValue) { - min = Math.min(min, value); - } - } - - return min; - } - - /** - * Get the maximum value stored in the map. If the map is empty then it will return {@link - * #missingValue()} - * - * @return the maximum value stored in the map. - */ - public int maxValue() { - final int missingValue = this.missingValue; - int max = size == 0 ? missingValue : Integer.MIN_VALUE; - - final int[] entries = this.entries; - final int length = entries.length; - - for (int valueIndex = 1; valueIndex < length; valueIndex += 2) { - final int value = entries[valueIndex]; - if (value != missingValue) { - max = Math.max(max, value); - } - } - - return max; - } - - /** {@inheritDoc} */ - public String toString() { - if (isEmpty()) { - return "{}"; - } - - final EntryIterator entryIterator = new EntryIterator(); - entryIterator.reset(); - - final StringBuilder sb = new StringBuilder().append('{'); - while (true) { - entryIterator.next(); - sb.append(entryIterator.getIntKey()).append('=').append(entryIterator.getIntValue()); - if (!entryIterator.hasNext()) { - return sb.append('}').toString(); - } - sb.append(',').append(' '); - } - } - - /** - * Primitive specialised version of {@link #replace(Object, Object)} - * - * @param key key with which the specified value is associated - * @param value value to be associated with the specified key - * @return the previous value associated with the specified key, or {@link #missingValue()} if - * there was no mapping for the key. - */ - public int replace(final int key, final int value) { - int curValue = get(key); - if (curValue != missingValue) { - curValue = put(key, value); - } - - return curValue; - } - - /** - * Primitive specialised version of {@link #replace(Object, Object, Object)} - * - * @param key key with which the specified value is associated - * @param oldValue value expected to be associated with the specified key - * @param newValue value to be associated with the specified key - * @return {@code true} if the value was replaced - */ - public boolean replace(final int key, final int oldValue, final int newValue) { - final int curValue = get(key); - if (curValue != oldValue || curValue == missingValue) { - return false; - } - - put(key, newValue); - - return true; - } - - /** {@inheritDoc} */ - @SuppressWarnings("unchecked") - public boolean equals(final Object o) { - if (this == o) { - return true; - } - if (!(o instanceof Map)) { - return false; - } - - final Map that = (Map) o; - - return size == that.size() && entrySet().equals(that.entrySet()); - } - - public int hashCode() { - return entrySet().hashCode(); - } - - private static int next(final int index, final int mask) { - return (index + 2) & mask; - } - - private void capacity(final int newCapacity) { - final int entriesLength = newCapacity * 2; - if (entriesLength < 0) { - throw new IllegalStateException("max capacity reached at size=" + size); - } - - /*@DoNotSub*/ - resizeThreshold = (int) (newCapacity * loadFactor); - entries = new int[entriesLength]; - Arrays.fill(entries, missingValue); - } - - private Integer valOrNull(final int value) { - return value == missingValue ? null : value; - } - - // ---------------- Utility Classes ---------------- - - abstract class AbstractIterator implements Serializable { - protected boolean isPositionValid = false; - private int remaining; - private int positionCounter; - private int stopCounter; - - final void reset() { - isPositionValid = false; - remaining = Int2IntHashMap.this.size; - final int missingValue = Int2IntHashMap.this.missingValue; - final int[] entries = Int2IntHashMap.this.entries; - final int capacity = entries.length; - - int keyIndex = capacity; - if (entries[capacity - 1] != missingValue) { - keyIndex = 0; - for (; keyIndex < capacity; keyIndex += 2) { - if (entries[keyIndex + 1] == missingValue) // lgtm [java/index-out-of-bounds] - { - break; - } - } - } - - stopCounter = keyIndex; - positionCounter = keyIndex + capacity; - } - - protected final int keyPosition() { - return positionCounter & entries.length - 1; - } - - public int remaining() { - return remaining; - } - - public boolean hasNext() { - return remaining > 0; - } - - protected final void findNext() { - if (!hasNext()) { - throw new NoSuchElementException(); - } - - final int[] entries = Int2IntHashMap.this.entries; - final int missingValue = Int2IntHashMap.this.missingValue; - final int mask = entries.length - 1; - - for (int keyIndex = positionCounter - 2; keyIndex >= stopCounter; keyIndex -= 2) { - final int index = keyIndex & mask; - if (entries[index + 1] != missingValue) { - isPositionValid = true; - positionCounter = keyIndex; - --remaining; - return; - } - } - - isPositionValid = false; - throw new IllegalStateException(); - } - - public void remove() { - if (isPositionValid) { - final int position = keyPosition(); - entries[position + 1] = missingValue; - --size; - - compactChain(position); - - isPositionValid = false; - } else { - throw new IllegalStateException(); - } - } - } - - /** Iterator over keys which supports access to unboxed keys. */ - public final class KeyIterator extends AbstractIterator implements Iterator { - public Integer next() { - return nextValue(); - } - - public int nextValue() { - findNext(); - - return entries[keyPosition()]; - } - } - - /** Iterator over values which supports access to unboxed values. */ - public final class ValueIterator extends AbstractIterator implements Iterator { - public Integer next() { - return nextValue(); - } - - public int nextValue() { - findNext(); - - return entries[keyPosition() + 1]; - } - } - - /** Iterator over entries which supports access to unboxed keys and values. */ - public final class EntryIterator extends AbstractIterator - implements Iterator>, Entry { - public Integer getKey() { - return getIntKey(); - } - - public int getIntKey() { - return entries[keyPosition()]; - } - - public Integer getValue() { - return getIntValue(); - } - - public int getIntValue() { - return entries[keyPosition() + 1]; - } - - public Integer setValue(final Integer value) { - return setValue(value.intValue()); - } - - public int setValue(final int value) { - if (!isPositionValid) { - throw new IllegalStateException(); - } - - if (missingValue == value) { - throw new IllegalArgumentException(); - } - - final int keyPosition = keyPosition(); - final int prevValue = entries[keyPosition + 1]; - entries[keyPosition + 1] = value; - return prevValue; - } - - public Entry next() { - findNext(); - - if (shouldAvoidAllocation) { - return this; - } - - return allocateDuplicateEntry(); - } - - private Entry allocateDuplicateEntry() { - final int k = getIntKey(); - final int v = getIntValue(); - - return new Entry() { - public Integer getKey() { - return k; - } - - public Integer getValue() { - return v; - } - - public Integer setValue(final Integer value) { - return Int2IntHashMap.this.put(k, value.intValue()); - } - - public int hashCode() { - return getIntKey() ^ getIntValue(); - } - - public boolean equals(final Object o) { - if (!(o instanceof Entry)) { - return false; - } - - final Entry e = (Entry) o; - - return (e.getKey() != null && e.getValue() != null) - && (e.getKey().equals(k) && e.getValue().equals(v)); - } - - public String toString() { - return k + "=" + v; - } - }; - } - - /** {@inheritDoc} */ - public int hashCode() { - return getIntKey() ^ getIntValue(); - } - - /** {@inheritDoc} */ - public boolean equals(final Object o) { - if (this == o) { - return true; - } - - if (!(o instanceof Entry)) { - return false; - } - - final Entry that = (Entry) o; - - return Objects.equals(getKey(), that.getKey()) && Objects.equals(getValue(), that.getValue()); - } - } - - /** Set of keys which supports optional cached iterators to avoid allocation. */ - public final class KeySet extends AbstractSet implements Serializable { - private final KeyIterator keyIterator = shouldAvoidAllocation ? new KeyIterator() : null; - - /** {@inheritDoc} */ - public KeyIterator iterator() { - KeyIterator keyIterator = this.keyIterator; - if (null == keyIterator) { - keyIterator = new KeyIterator(); - } - - keyIterator.reset(); - - return keyIterator; - } - - /** {@inheritDoc} */ - public int size() { - return Int2IntHashMap.this.size(); - } - - /** {@inheritDoc} */ - public boolean isEmpty() { - return Int2IntHashMap.this.isEmpty(); - } - - /** {@inheritDoc} */ - public void clear() { - Int2IntHashMap.this.clear(); - } - - /** {@inheritDoc} */ - public boolean contains(final Object o) { - return contains((int) o); - } - - public boolean contains(final int key) { - return containsKey(key); - } - } - - /** Collection of values which supports optionally cached iterators to avoid allocation. */ - public final class ValueCollection extends AbstractCollection { - private final ValueIterator valueIterator = shouldAvoidAllocation ? new ValueIterator() : null; - - /** {@inheritDoc} */ - public ValueIterator iterator() { - ValueIterator valueIterator = this.valueIterator; - if (null == valueIterator) { - valueIterator = new ValueIterator(); - } - - valueIterator.reset(); - - return valueIterator; - } - - /** {@inheritDoc} */ - public int size() { - return Int2IntHashMap.this.size(); - } - - /** {@inheritDoc} */ - public boolean contains(final Object o) { - return contains((int) o); - } - - public boolean contains(final int key) { - return containsValue(key); - } - } - - /** Set of entries which supports optionally cached iterators to avoid allocation. */ - public final class EntrySet extends AbstractSet> implements Serializable { - private final EntryIterator entryIterator = shouldAvoidAllocation ? new EntryIterator() : null; - - /** {@inheritDoc} */ - public EntryIterator iterator() { - EntryIterator entryIterator = this.entryIterator; - if (null == entryIterator) { - entryIterator = new EntryIterator(); - } - - entryIterator.reset(); - - return entryIterator; - } - - /** {@inheritDoc} */ - public int size() { - return Int2IntHashMap.this.size(); - } - - /** {@inheritDoc} */ - public boolean isEmpty() { - return Int2IntHashMap.this.isEmpty(); - } - - /** {@inheritDoc} */ - public void clear() { - Int2IntHashMap.this.clear(); - } - - /** {@inheritDoc} */ - public boolean contains(final Object o) { - final Entry entry = (Entry) o; - final Integer value = get(entry.getKey()); - - return value != null && value.equals(entry.getValue()); - } - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Int2ObjectHashMap.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Int2ObjectHashMap.java deleted file mode 100644 index 7749ee90..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Int2ObjectHashMap.java +++ /dev/null @@ -1,808 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Copyright 2014-2020 Real Logic Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package co.elastic.otel.profiler.collections; - -import static co.elastic.otel.profiler.collections.CollectionUtil.findNextPositivePowerOfTwo; -import static co.elastic.otel.profiler.collections.CollectionUtil.validateLoadFactor; -import static java.util.Objects.requireNonNull; - -import java.io.Serializable; -import java.util.AbstractCollection; -import java.util.AbstractSet; -import java.util.Arrays; -import java.util.Iterator; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.Objects; - -/** - * {@link Map} implementation specialised for int keys using open addressing and linear probing for - * cache efficient access. - * - * @param type of values stored in the {@link Map} - */ -public class Int2ObjectHashMap implements Map, Serializable { - static final int MIN_CAPACITY = 8; - - private final float loadFactor; - private int resizeThreshold; - private int size; - private final boolean shouldAvoidAllocation; - - private int[] keys; - private Object[] values; - - private ValueCollection valueCollection; - private KeySet keySet; - private EntrySet entrySet; - - public Int2ObjectHashMap() { - this(MIN_CAPACITY, Hashing.DEFAULT_LOAD_FACTOR, true); - } - - public Int2ObjectHashMap(final int initialCapacity, final float loadFactor) { - this(initialCapacity, loadFactor, true); - } - - /** - * Construct a new map allowing a configuration for initial capacity and load factor. - * - * @param initialCapacity for the backing array - * @param loadFactor limit for resizing on puts - * @param shouldAvoidAllocation should allocation be avoided by caching iterators and map entries. - */ - public Int2ObjectHashMap( - final int initialCapacity, final float loadFactor, final boolean shouldAvoidAllocation) { - validateLoadFactor(loadFactor); - - this.loadFactor = loadFactor; - this.shouldAvoidAllocation = shouldAvoidAllocation; - - /* */ - final int capacity = findNextPositivePowerOfTwo(Math.max(MIN_CAPACITY, initialCapacity)); - /* */ - resizeThreshold = (int) (capacity * loadFactor); - - keys = new int[capacity]; - values = new Object[capacity]; - } - - /** - * Copy construct a new map from an existing one. - * - * @param mapToCopy for construction. - */ - public Int2ObjectHashMap(final Int2ObjectHashMap mapToCopy) { - this.loadFactor = mapToCopy.loadFactor; - this.resizeThreshold = mapToCopy.resizeThreshold; - this.size = mapToCopy.size; - this.shouldAvoidAllocation = mapToCopy.shouldAvoidAllocation; - - keys = mapToCopy.keys.clone(); - values = mapToCopy.values.clone(); - } - - /** - * Get the load factor beyond which the map will increase size. - * - * @return load factor for when the map should increase size. - */ - public float loadFactor() { - return loadFactor; - } - - /** - * Get the total capacity for the map to which the load factor will be a fraction of. - * - * @return the total capacity for the map. - */ - public int capacity() { - return values.length; - } - - /** - * Get the actual threshold which when reached the map will resize. This is a function of the - * current capacity and load factor. - * - * @return the threshold when the map will resize. - */ - public int resizeThreshold() { - return resizeThreshold; - } - - /** {@inheritDoc} */ - public int size() { - return size; - } - - /** {@inheritDoc} */ - public boolean isEmpty() { - return 0 == size; - } - - /** {@inheritDoc} */ - public boolean containsKey(final Object key) { - return containsKey(((Integer) key).intValue()); - } - - /** - * Overloaded version of {@link Map#containsKey(Object)} that takes a primitive int key. - * - * @param key for indexing the {@link Map} - * @return true if the key is found otherwise false. - */ - public boolean containsKey(final int key) { - final int mask = values.length - 1; - int index = Hashing.hash(key, mask); - - boolean found = false; - while (null != values[index]) { - if (key == keys[index]) { - found = true; - break; - } - - index = ++index & mask; - } - - return found; - } - - /** {@inheritDoc} */ - public boolean containsValue(final Object value) { - boolean found = false; - final Object val = mapNullValue(value); - if (null != val) { - for (final Object v : values) { - if (val.equals(v)) { - found = true; - break; - } - } - } - - return found; - } - - /** {@inheritDoc} */ - public V get(final Object key) { - return get(((Integer) key).intValue()); - } - - /** - * Overloaded version of {@link Map#get(Object)} that takes a primitive int key. - * - * @param key for indexing the {@link Map} - * @return the value if found otherwise null - */ - public V get(final int key) { - return unmapNullValue(getMapped(key)); - } - - @SuppressWarnings("unchecked") - protected V getMapped(final int key) { - final int mask = values.length - 1; - int index = Hashing.hash(key, mask); - - Object value; - while (null != (value = values[index])) { - if (key == keys[index]) { - break; - } - - index = ++index & mask; - } - - return (V) value; - } - - /** {@inheritDoc} */ - public V put(final Integer key, final V value) { - return put(key.intValue(), value); - } - - /** - * Overloaded version of {@link Map#put(Object, Object)} that takes a primitive int key. - * - * @param key for indexing the {@link Map} - * @param value to be inserted in the {@link Map} - * @return the previous value if found otherwise null - */ - @SuppressWarnings("unchecked") - public V put(final int key, final V value) { - final V val = (V) mapNullValue(value); - requireNonNull(val, "value cannot be null"); - - V oldValue = null; - final int mask = values.length - 1; - int index = Hashing.hash(key, mask); - - while (null != values[index]) { - if (key == keys[index]) { - oldValue = (V) values[index]; - break; - } - - index = ++index & mask; - } - - if (null == oldValue) { - ++size; - keys[index] = key; - } - - values[index] = val; - - if (size > resizeThreshold) { - increaseCapacity(); - } - - return unmapNullValue(oldValue); - } - - /** {@inheritDoc} */ - public V remove(final Object key) { - return remove(((Integer) key).intValue()); - } - - /** - * Overloaded version of {@link Map#remove(Object)} that takes a primitive int key. - * - * @param key for indexing the {@link Map} - * @return the value if found otherwise null - */ - public V remove(final int key) { - final int mask = values.length - 1; - int index = Hashing.hash(key, mask); - - Object value; - while (null != (value = values[index])) { - if (key == keys[index]) { - values[index] = null; - --size; - - compactChain(index); - break; - } - - index = ++index & mask; - } - - return unmapNullValue(value); - } - - /** {@inheritDoc} */ - public void clear() { - if (size > 0) { - Arrays.fill(values, null); - size = 0; - } - } - - /** - * Compact the {@link Map} backing arrays by rehashing with a capacity just larger than current - * size and giving consideration to the load factor. - */ - public void compact() { - final int idealCapacity = (int) Math.round(size() * (1.0d / loadFactor)); - rehash(findNextPositivePowerOfTwo(Math.max(MIN_CAPACITY, idealCapacity))); - } - - /** {@inheritDoc} */ - public void putAll(final Map map) { - for (final Entry entry : map.entrySet()) { - put(entry.getKey(), entry.getValue()); - } - } - - /** {@inheritDoc} */ - public KeySet keySet() { - if (null == keySet) { - keySet = new KeySet(); - } - - return keySet; - } - - /** {@inheritDoc} */ - public ValueCollection values() { - if (null == valueCollection) { - valueCollection = new ValueCollection(); - } - - return valueCollection; - } - - /** {@inheritDoc} */ - public EntrySet entrySet() { - if (null == entrySet) { - entrySet = new EntrySet(); - } - - return entrySet; - } - - /** {@inheritDoc} */ - public String toString() { - if (isEmpty()) { - return "{}"; - } - - final EntryIterator entryIterator = new EntryIterator(); - entryIterator.reset(); - - final StringBuilder sb = new StringBuilder().append('{'); - while (true) { - entryIterator.next(); - sb.append(entryIterator.getIntKey()) - .append('=') - .append(unmapNullValue(entryIterator.getValue())); - if (!entryIterator.hasNext()) { - return sb.append('}').toString(); - } - sb.append(',').append(' '); - } - } - - /** {@inheritDoc} */ - public boolean equals(final Object o) { - if (this == o) { - return true; - } - - if (!(o instanceof Map)) { - return false; - } - - final Map that = (Map) o; - - if (size != that.size()) { - return false; - } - - for (int i = 0, length = values.length; i < length; i++) { - final Object thisValue = values[i]; - if (null != thisValue) { - final Object thatValue = that.get(keys[i]); - if (!thisValue.equals(mapNullValue(thatValue))) { - return false; - } - } - } - - return true; - } - - /** {@inheritDoc} */ - public int hashCode() { - int result = 0; - - for (int i = 0, length = values.length; i < length; i++) { - final Object value = values[i]; - if (null != value) { - result += (keys[i] ^ value.hashCode()); - } - } - - return result; - } - - protected Object mapNullValue(final Object value) { - return value; - } - - @SuppressWarnings("unchecked") - protected V unmapNullValue(final Object value) { - return (V) value; - } - - /** - * Primitive specialised version of {@link #replace(Object, Object)} - * - * @param key key with which the specified value is associated - * @param value value to be associated with the specified key - * @return the previous value associated with the specified key, or {@code null} if there was no - * mapping for the key. - */ - public V replace(final int key, final V value) { - V curValue = get(key); - if (curValue != null) { - curValue = put(key, value); - } - - return curValue; - } - - /** - * Primitive specialised version of {@link #replace(Object, Object, Object)} - * - * @param key key with which the specified value is associated - * @param oldValue value expected to be associated with the specified key - * @param newValue value to be associated with the specified key - * @return {@code true} if the value was replaced - */ - public boolean replace(final int key, final V oldValue, final V newValue) { - final Object curValue = get(key); - if (curValue == null || !Objects.equals(unmapNullValue(curValue), oldValue)) { - return false; - } - - put(key, newValue); - - return true; - } - - private void increaseCapacity() { - final int newCapacity = values.length << 1; - if (newCapacity < 0) { - throw new IllegalStateException("max capacity reached at size=" + size); - } - - rehash(newCapacity); - } - - private void rehash(final int newCapacity) { - final int mask = newCapacity - 1; - /* */ - resizeThreshold = (int) (newCapacity * loadFactor); - - final int[] tempKeys = new int[newCapacity]; - final Object[] tempValues = new Object[newCapacity]; - - for (int i = 0, size = values.length; i < size; i++) { - final Object value = values[i]; - if (null != value) { - final int key = keys[i]; - int index = Hashing.hash(key, mask); - while (null != tempValues[index]) { - index = ++index & mask; - } - - tempKeys[index] = key; - tempValues[index] = value; - } - } - - keys = tempKeys; - values = tempValues; - } - - @SuppressWarnings("FinalParameters") - private void compactChain(int deleteIndex) { - final int mask = values.length - 1; - int index = deleteIndex; - while (true) { - index = ++index & mask; - if (null == values[index]) { - break; - } - - final int hash = Hashing.hash(keys[index], mask); - - if ((index < hash && (hash <= deleteIndex || deleteIndex <= index)) - || (hash <= deleteIndex && deleteIndex <= index)) { - keys[deleteIndex] = keys[index]; - values[deleteIndex] = values[index]; - - values[index] = null; - deleteIndex = index; - } - } - } - - /////////////////////////////////////////////////////////////////////////////////////////////// - // Sets and Collections - /////////////////////////////////////////////////////////////////////////////////////////////// - - /** Set of keys which supports optionally cached iterators to avoid allocation. */ - public final class KeySet extends AbstractSet implements Serializable { - private final KeyIterator keyIterator = shouldAvoidAllocation ? new KeyIterator() : null; - - /** {@inheritDoc} */ - public KeyIterator iterator() { - KeyIterator keyIterator = this.keyIterator; - if (null == keyIterator) { - keyIterator = new KeyIterator(); - } - - keyIterator.reset(); - return keyIterator; - } - - public int size() { - return Int2ObjectHashMap.this.size(); - } - - public boolean contains(final Object o) { - return Int2ObjectHashMap.this.containsKey(o); - } - - public boolean contains(final int key) { - return Int2ObjectHashMap.this.containsKey(key); - } - - public boolean remove(final Object o) { - return null != Int2ObjectHashMap.this.remove(o); - } - - public boolean remove(final int key) { - return null != Int2ObjectHashMap.this.remove(key); - } - - public void clear() { - Int2ObjectHashMap.this.clear(); - } - } - - /** Collection of values which supports optionally cached iterators to avoid allocation. */ - public final class ValueCollection extends AbstractCollection implements Serializable { - private final ValueIterator valueIterator = shouldAvoidAllocation ? new ValueIterator() : null; - - /** {@inheritDoc} */ - public ValueIterator iterator() { - ValueIterator valueIterator = this.valueIterator; - if (null == valueIterator) { - valueIterator = new ValueIterator(); - } - - valueIterator.reset(); - return valueIterator; - } - - public int size() { - return Int2ObjectHashMap.this.size(); - } - - public boolean contains(final Object o) { - return Int2ObjectHashMap.this.containsValue(o); - } - - public void clear() { - Int2ObjectHashMap.this.clear(); - } - } - - /** Set of entries which supports access via an optionally cached iterator to avoid allocation. */ - public final class EntrySet extends AbstractSet> implements Serializable { - private final EntryIterator entryIterator = shouldAvoidAllocation ? new EntryIterator() : null; - - /** {@inheritDoc} */ - public EntryIterator iterator() { - EntryIterator entryIterator = this.entryIterator; - if (null == entryIterator) { - entryIterator = new EntryIterator(); - } - - entryIterator.reset(); - return entryIterator; - } - - public int size() { - return Int2ObjectHashMap.this.size(); - } - - public void clear() { - Int2ObjectHashMap.this.clear(); - } - - /** {@inheritDoc} */ - public boolean contains(final Object o) { - final Entry entry = (Entry) o; - final int key = (Integer) entry.getKey(); - final V value = getMapped(key); - return value != null && value.equals(mapNullValue(entry.getValue())); - } - } - - /////////////////////////////////////////////////////////////////////////////////////////////// - // Iterators - /////////////////////////////////////////////////////////////////////////////////////////////// - - abstract class AbstractIterator implements Iterator, Serializable { - private int posCounter; - private int stopCounter; - private int remaining; - boolean isPositionValid = false; - - protected final int position() { - return posCounter & (values.length - 1); - } - - public int remaining() { - return remaining; - } - - public boolean hasNext() { - return remaining > 0; - } - - protected final void findNext() { - if (!hasNext()) { - throw new NoSuchElementException(); - } - - final Object[] values = Int2ObjectHashMap.this.values; - final int mask = values.length - 1; - - for (int i = posCounter - 1; i >= stopCounter; i--) { - final int index = i & mask; - if (null != values[index]) { - posCounter = i; - isPositionValid = true; - --remaining; - return; - } - } - - isPositionValid = false; - throw new IllegalStateException(); - } - - public abstract T next(); - - public void remove() { - if (isPositionValid) { - final int position = position(); - values[position] = null; - --size; - - compactChain(position); - - isPositionValid = false; - } else { - throw new IllegalStateException(); - } - } - - final void reset() { - remaining = Int2ObjectHashMap.this.size; - final Object[] values = Int2ObjectHashMap.this.values; - final int capacity = values.length; - - int i = capacity; - if (null != values[capacity - 1]) { - for (i = 0; i < capacity; i++) { - if (null == values[i]) { - break; - } - } - } - - stopCounter = i; - posCounter = i + capacity; - isPositionValid = false; - } - } - - /** Iterator over values. */ - public class ValueIterator extends AbstractIterator { - public V next() { - findNext(); - - return unmapNullValue(values[position()]); - } - } - - /** Iterator over keys which supports access to unboxed keys. */ - public class KeyIterator extends AbstractIterator { - public Integer next() { - return nextInt(); - } - - public int nextInt() { - findNext(); - - return keys[position()]; - } - } - - /** Iterator over entries which supports access to unboxed keys and values. */ - public class EntryIterator extends AbstractIterator> - implements Entry { - public Entry next() { - findNext(); - if (shouldAvoidAllocation) { - return this; - } - - return allocateDuplicateEntry(); - } - - private Entry allocateDuplicateEntry() { - final int k = getIntKey(); - final V v = getValue(); - - return new Entry() { - public Integer getKey() { - return k; - } - - public V getValue() { - return v; - } - - public V setValue(final V value) { - return Int2ObjectHashMap.this.put(k, value); - } - - public int hashCode() { - return getIntKey() ^ (v != null ? v.hashCode() : 0); - } - - public boolean equals(final Object o) { - if (!(o instanceof Entry)) { - return false; - } - - final Entry e = (Entry) o; - - return (e.getKey() != null && e.getKey().equals(k)) - && ((e.getValue() == null && v == null) || e.getValue().equals(v)); - } - - public String toString() { - return k + "=" + v; - } - }; - } - - public Integer getKey() { - return getIntKey(); - } - - public int getIntKey() { - return keys[position()]; - } - - public V getValue() { - return unmapNullValue(values[position()]); - } - - @SuppressWarnings("unchecked") - public V setValue(final V value) { - final V val = (V) mapNullValue(value); - requireNonNull(val, "value cannot be null"); - - if (!this.isPositionValid) { - throw new IllegalStateException(); - } - - final int pos = position(); - final Object oldValue = values[pos]; - values[pos] = val; - - return (V) oldValue; - } - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/IntIntConsumer.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/IntIntConsumer.java deleted file mode 100644 index 3c0e6ec7..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/IntIntConsumer.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Copyright 2014-2020 Real Logic Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package co.elastic.otel.profiler.collections; - -/** This is an (int, int) primitive specialisation of a BiConsumer */ -@FunctionalInterface -public interface IntIntConsumer { - /** - * Accept two values that comes as a tuple of ints. - * - * @param valueOne for the tuple. - * @param valueTwo for the tuple. - */ - void accept(int valueOne, int valueTwo); -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Long2LongHashMap.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Long2LongHashMap.java deleted file mode 100644 index adddb09f..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Long2LongHashMap.java +++ /dev/null @@ -1,879 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Copyright 2014-2020 Real Logic Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package co.elastic.otel.profiler.collections; - -import static co.elastic.otel.profiler.collections.CollectionUtil.findNextPositivePowerOfTwo; -import static co.elastic.otel.profiler.collections.CollectionUtil.validateLoadFactor; - -import java.io.Serializable; -import java.util.AbstractCollection; -import java.util.AbstractSet; -import java.util.Arrays; -import java.util.Iterator; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.Objects; - -/** A open addressing with linear probing hash map specialised for primitive key and value pairs. */ -public class Long2LongHashMap implements Map, Serializable { - static final int MIN_CAPACITY = 8; - - private final float loadFactor; - private final long missingValue; - private int resizeThreshold; - private int size = 0; - private final boolean shouldAvoidAllocation; - - private long[] entries; - private KeySet keySet; - private ValueCollection values; - private EntrySet entrySet; - - public Long2LongHashMap(final long missingValue) { - this(MIN_CAPACITY, Hashing.DEFAULT_LOAD_FACTOR, missingValue); - } - - public Long2LongHashMap( - final int initialCapacity, final float loadFactor, final long missingValue) { - this(initialCapacity, loadFactor, missingValue, true); - } - - /** - * @param initialCapacity for the map to override {@link #MIN_CAPACITY} - * @param loadFactor for the map to override {@link Hashing#DEFAULT_LOAD_FACTOR}. - * @param missingValue for the map that represents null. - * @param shouldAvoidAllocation should allocation be avoided by caching iterators and map entries. - */ - public Long2LongHashMap( - final int initialCapacity, - final float loadFactor, - final long missingValue, - final boolean shouldAvoidAllocation) { - validateLoadFactor(loadFactor); - - this.loadFactor = loadFactor; - this.missingValue = missingValue; - this.shouldAvoidAllocation = shouldAvoidAllocation; - - capacity(findNextPositivePowerOfTwo(Math.max(MIN_CAPACITY, initialCapacity))); - } - - /** - * The value to be used as a null marker in the map. - * - * @return value to be used as a null marker in the map. - */ - public long missingValue() { - return missingValue; - } - - /** - * Get the load factor applied for resize operations. - * - * @return the load factor applied for resize operations. - */ - public float loadFactor() { - return loadFactor; - } - - /** - * Get the total capacity for the map to which the load factor will be a fraction of. - * - * @return the total capacity for the map. - */ - public int capacity() { - return entries.length >> 2; - } - - /** - * Get the actual threshold which when reached the map will resize. This is a function of the - * current capacity and load factor. - * - * @return the threshold when the map will resize. - */ - public int resizeThreshold() { - return resizeThreshold; - } - - /** {@inheritDoc} */ - public int size() { - return size; - } - - /** {@inheritDoc} */ - public boolean isEmpty() { - return size == 0; - } - - public long get(final long key) { - final long[] entries = this.entries; - final long missingValue = this.missingValue; - final int mask = entries.length - 1; - int index = Hashing.evenHash(key, mask); - - long value = missingValue; - while (entries[index + 1] != missingValue) { - if (entries[index] == key) { - value = entries[index + 1]; - break; - } - - index = next(index, mask); - } - - return value; - } - - /** - * Put a key value pair in the map. - * - * @param key lookup key - * @param value new value, must not be initialValue - * @return current counter value associated with key, or initialValue if none found - * @throws IllegalArgumentException if value is missingValue - */ - public long put(final long key, final long value) { - if (value == missingValue) { - throw new IllegalArgumentException("cannot accept missingValue"); - } - - final long[] entries = this.entries; - final long missingValue = this.missingValue; - final int mask = entries.length - 1; - int index = Hashing.evenHash(key, mask); - long oldValue = missingValue; - - while (entries[index + 1] != missingValue) { - if (entries[index] == key) { - oldValue = entries[index + 1]; - break; - } - - index = next(index, mask); - } - - if (oldValue == missingValue) { - ++size; - entries[index] = key; - } - - entries[index + 1] = value; - - increaseCapacity(); - - return oldValue; - } - - private void increaseCapacity() { - if (size > resizeThreshold) { - // entries.length = 2 * capacity - final int newCapacity = entries.length; - rehash(newCapacity); - } - } - - private void rehash(final int newCapacity) { - final long[] oldEntries = entries; - final long missingValue = this.missingValue; - final int length = entries.length; - - capacity(newCapacity); - - final long[] newEntries = entries; - final int mask = entries.length - 1; - - for (int keyIndex = 0; keyIndex < length; keyIndex += 2) { - final long value = oldEntries[keyIndex + 1]; - if (value != missingValue) { - final long key = oldEntries[keyIndex]; - int index = Hashing.evenHash(key, mask); - - while (newEntries[index + 1] != missingValue) { - index = next(index, mask); - } - - newEntries[index] = key; - newEntries[index + 1] = value; - } - } - } - - /** - * Primitive specialised forEach implementation. - * - *

NB: Renamed from forEach to avoid overloading on parameter types of lambda expression, which - * doesn't play well with type inference in lambda expressions. - * - * @param consumer a callback called for each key/value pair in the map. - */ - public void longForEach(final LongLongConsumer consumer) { - final long[] entries = this.entries; - final long missingValue = this.missingValue; - final int length = entries.length; - - for (int keyIndex = 0; keyIndex < length; keyIndex += 2) { - if (entries[keyIndex + 1] != missingValue) // lgtm [java/index-out-of-bounds] - { - consumer.accept( - entries[keyIndex], entries[keyIndex + 1]); // lgtm [java/index-out-of-bounds] - } - } - } - - /** - * Long primitive specialised containsKey. - * - * @param key the key to check. - * @return true if the map contains key as a key, false otherwise. - */ - public boolean containsKey(final long key) { - return get(key) != missingValue; - } - - /** - * Does the map contain the value. - * - * @param value to be tested against contained values. - * @return true if contained otherwise value. - */ - public boolean containsValue(final long value) { - boolean found = false; - if (value != missingValue) { - final long[] entries = this.entries; - final int length = entries.length; - - for (int valueIndex = 1; valueIndex < length; valueIndex += 2) { - if (value == entries[valueIndex]) { - found = true; - break; - } - } - } - - return found; - } - - /** {@inheritDoc} */ - public void clear() { - if (size > 0) { - Arrays.fill(entries, missingValue); - size = 0; - } - } - - /** - * Compact the backing arrays by rehashing with a capacity just larger than current size and - * giving consideration to the load factor. - */ - public void compact() { - final int idealCapacity = (int) Math.round(size() * (1.0d / loadFactor)); - rehash(findNextPositivePowerOfTwo(Math.max(MIN_CAPACITY, idealCapacity))); - } - - // ---------------- Boxed Versions Below ---------------- - - /** {@inheritDoc} */ - public Long get(final Object key) { - return valOrNull(get((long) key)); - } - - /** {@inheritDoc} */ - public Long put(final Long key, final Long value) { - return valOrNull(put((long) key, (long) value)); - } - - /** {@inheritDoc} */ - public boolean containsKey(final Object key) { - return containsKey((long) key); - } - - /** {@inheritDoc} */ - public boolean containsValue(final Object value) { - return containsValue((long) value); - } - - /** {@inheritDoc} */ - public void putAll(final Map map) { - for (final Map.Entry entry : map.entrySet()) { - put(entry.getKey(), entry.getValue()); - } - } - - /** {@inheritDoc} */ - public KeySet keySet() { - if (null == keySet) { - keySet = new KeySet(); - } - - return keySet; - } - - /** {@inheritDoc} */ - public ValueCollection values() { - if (null == values) { - values = new ValueCollection(); - } - - return values; - } - - /** {@inheritDoc} */ - public EntrySet entrySet() { - if (null == entrySet) { - entrySet = new EntrySet(); - } - - return entrySet; - } - - /** {@inheritDoc} */ - public Long remove(final Object key) { - return valOrNull(remove((long) key)); - } - - public long remove(final long key) { - final long[] entries = this.entries; - final long missingValue = this.missingValue; - final int mask = entries.length - 1; - int keyIndex = Hashing.evenHash(key, mask); - - long oldValue = missingValue; - while (entries[keyIndex + 1] != missingValue) { - if (entries[keyIndex] == key) { - oldValue = entries[keyIndex + 1]; - entries[keyIndex + 1] = missingValue; - size--; - - compactChain(keyIndex); - - break; - } - - keyIndex = next(keyIndex, mask); - } - - return oldValue; - } - - @SuppressWarnings("FinalParameters") - private void compactChain(int deleteKeyIndex) { - final long[] entries = this.entries; - final long missingValue = this.missingValue; - final int mask = entries.length - 1; - int keyIndex = deleteKeyIndex; - - while (true) { - keyIndex = next(keyIndex, mask); - if (entries[keyIndex + 1] == missingValue) { - break; - } - - final int hash = Hashing.evenHash(entries[keyIndex], mask); - - if ((keyIndex < hash && (hash <= deleteKeyIndex || deleteKeyIndex <= keyIndex)) - || (hash <= deleteKeyIndex && deleteKeyIndex <= keyIndex)) { - entries[deleteKeyIndex] = entries[keyIndex]; - entries[deleteKeyIndex + 1] = entries[keyIndex + 1]; - - entries[keyIndex + 1] = missingValue; - deleteKeyIndex = keyIndex; - } - } - } - - /** - * Get the minimum value stored in the map. If the map is empty then it will return {@link - * #missingValue()} - * - * @return the minimum value stored in the map. - */ - public long minValue() { - final long missingValue = this.missingValue; - long min = size == 0 ? missingValue : Long.MAX_VALUE; - - final long[] entries = this.entries; - final int length = entries.length; - - for (int valueIndex = 1; valueIndex < length; valueIndex += 2) { - final long value = entries[valueIndex]; - if (value != missingValue) { - min = Math.min(min, value); - } - } - - return min; - } - - /** - * Get the maximum value stored in the map. If the map is empty then it will return {@link - * #missingValue()} - * - * @return the maximum value stored in the map. - */ - public long maxValue() { - final long missingValue = this.missingValue; - long max = size == 0 ? missingValue : Long.MIN_VALUE; - - final long[] entries = this.entries; - final int length = entries.length; - - for (int valueIndex = 1; valueIndex < length; valueIndex += 2) { - final long value = entries[valueIndex]; - if (value != missingValue) { - max = Math.max(max, value); - } - } - - return max; - } - - /** {@inheritDoc} */ - public String toString() { - if (isEmpty()) { - return "{}"; - } - - final EntryIterator entryIterator = new EntryIterator(); - entryIterator.reset(); - - final StringBuilder sb = new StringBuilder().append('{'); - while (true) { - entryIterator.next(); - sb.append(entryIterator.getLongKey()).append('=').append(entryIterator.getLongValue()); - if (!entryIterator.hasNext()) { - return sb.append('}').toString(); - } - sb.append(',').append(' '); - } - } - - /** - * Primitive specialised version of {@link #replace(Object, Object)} - * - * @param key key with which the specified value is associated - * @param value value to be associated with the specified key - * @return the previous value associated with the specified key, or {@link #missingValue()} if - * there was no mapping for the key. - */ - public long replace(final long key, final long value) { - long currentValue = get(key); - if (currentValue != missingValue) { - currentValue = put(key, value); - } - - return currentValue; - } - - /** - * Primitive specialised version of {@link #replace(Object, Object, Object)} - * - * @param key key with which the specified value is associated - * @param oldValue value expected to be associated with the specified key - * @param newValue value to be associated with the specified key - * @return {@code true} if the value was replaced - */ - public boolean replace(final long key, final long oldValue, final long newValue) { - final long curValue = get(key); - if (curValue != oldValue || curValue == missingValue) { - return false; - } - - put(key, newValue); - - return true; - } - - /** {@inheritDoc} */ - public boolean equals(final Object o) { - if (this == o) { - return true; - } - if (!(o instanceof Map)) { - return false; - } - - final Map that = (Map) o; - - return size == that.size() && entrySet().equals(that.entrySet()); - } - - public int hashCode() { - return entrySet().hashCode(); - } - - private static int next(final int index, final int mask) { - return (index + 2) & mask; - } - - private void capacity(final int newCapacity) { - final int entriesLength = newCapacity * 2; - if (entriesLength < 0) { - throw new IllegalStateException("max capacity reached at size=" + size); - } - - resizeThreshold = (int) (newCapacity * loadFactor); - entries = new long[entriesLength]; - Arrays.fill(entries, missingValue); - } - - private Long valOrNull(final long value) { - return value == missingValue ? null : value; - } - - // ---------------- Utility Classes ---------------- - - abstract class AbstractIterator implements Serializable { - protected boolean isPositionValid = false; - private int remaining; - private int positionCounter; - private int stopCounter; - - final void reset() { - isPositionValid = false; - remaining = Long2LongHashMap.this.size; - final long missingValue = Long2LongHashMap.this.missingValue; - final long[] entries = Long2LongHashMap.this.entries; - final int capacity = entries.length; - - int keyIndex = capacity; - if (entries[capacity - 1] != missingValue) { - keyIndex = 0; - for (; keyIndex < capacity; keyIndex += 2) { - if (entries[keyIndex + 1] == missingValue) // lgtm [java/index-out-of-bounds] - { - break; - } - } - } - - stopCounter = keyIndex; - positionCounter = keyIndex + capacity; - } - - protected final int keyPosition() { - return positionCounter & entries.length - 1; - } - - public int remaining() { - return remaining; - } - - public boolean hasNext() { - return remaining > 0; - } - - protected final void findNext() { - if (!hasNext()) { - throw new NoSuchElementException(); - } - - final long[] entries = Long2LongHashMap.this.entries; - final long missingValue = Long2LongHashMap.this.missingValue; - final int mask = entries.length - 1; - - for (int keyIndex = positionCounter - 2; keyIndex >= stopCounter; keyIndex -= 2) { - final int index = keyIndex & mask; - if (entries[index + 1] != missingValue) { - isPositionValid = true; - positionCounter = keyIndex; - --remaining; - return; - } - } - - isPositionValid = false; - throw new IllegalStateException(); - } - - public void remove() { - if (isPositionValid) { - final int position = keyPosition(); - entries[position + 1] = missingValue; - --size; - - compactChain(position); - - isPositionValid = false; - } else { - throw new IllegalStateException(); - } - } - } - - /** Iterator over keys which supports access to unboxed keys. */ - public final class KeyIterator extends AbstractIterator implements Iterator { - public Long next() { - return nextValue(); - } - - public long nextValue() { - findNext(); - return entries[keyPosition()]; - } - } - - /** Iterator over values which supports access to unboxed values. */ - public final class ValueIterator extends AbstractIterator implements Iterator { - public Long next() { - return nextValue(); - } - - public long nextValue() { - findNext(); - return entries[keyPosition() + 1]; - } - } - - /** Iterator over entries which supports access to unboxed keys and values. */ - public final class EntryIterator extends AbstractIterator - implements Iterator>, Entry { - public Long getKey() { - return getLongKey(); - } - - public long getLongKey() { - return entries[keyPosition()]; - } - - public Long getValue() { - return getLongValue(); - } - - public long getLongValue() { - return entries[keyPosition() + 1]; - } - - public Long setValue(final Long value) { - return setValue(value.longValue()); - } - - public long setValue(final long value) { - if (!isPositionValid) { - throw new IllegalStateException(); - } - - if (missingValue == value) { - throw new IllegalArgumentException(); - } - - final int keyPosition = keyPosition(); - final long prevValue = entries[keyPosition + 1]; - entries[keyPosition + 1] = value; - return prevValue; - } - - public Entry next() { - findNext(); - - if (shouldAvoidAllocation) { - return this; - } - - return allocateDuplicateEntry(); - } - - private Entry allocateDuplicateEntry() { - final long k = getLongKey(); - final long v = getLongValue(); - - return new Entry() { - public Long getKey() { - return k; - } - - public Long getValue() { - return v; - } - - public Long setValue(final Long value) { - return Long2LongHashMap.this.put(k, value.longValue()); - } - - public int hashCode() { - return Hashing.hashCode(getLongKey()) ^ Hashing.hashCode(getLongValue()); - } - - public boolean equals(final Object o) { - if (!(o instanceof Entry)) { - return false; - } - - final Map.Entry e = (Entry) o; - - return (e.getKey() != null && e.getValue() != null) - && (e.getKey().equals(k) && e.getValue().equals(v)); - } - - public String toString() { - return k + "=" + v; - } - }; - } - - /** {@inheritDoc} */ - public int hashCode() { - return Hashing.hashCode(getLongKey()) ^ Hashing.hashCode(getLongValue()); - } - - /** {@inheritDoc} */ - public boolean equals(final Object o) { - if (this == o) { - return true; - } - - if (!(o instanceof Entry)) { - return false; - } - - final Entry that = (Entry) o; - - return Objects.equals(getKey(), that.getKey()) && Objects.equals(getValue(), that.getValue()); - } - } - - /** Set of keys which supports optional cached iterators to avoid allocation. */ - public final class KeySet extends AbstractSet implements Serializable { - private final KeyIterator keyIterator = shouldAvoidAllocation ? new KeyIterator() : null; - - /** {@inheritDoc} */ - public KeyIterator iterator() { - KeyIterator keyIterator = this.keyIterator; - if (null == keyIterator) { - keyIterator = new KeyIterator(); - } - - keyIterator.reset(); - - return keyIterator; - } - - /** {@inheritDoc} */ - public int size() { - return Long2LongHashMap.this.size(); - } - - /** {@inheritDoc} */ - public boolean isEmpty() { - return Long2LongHashMap.this.isEmpty(); - } - - /** {@inheritDoc} */ - public void clear() { - Long2LongHashMap.this.clear(); - } - - /** {@inheritDoc} */ - public boolean contains(final Object o) { - return contains((long) o); - } - - public boolean contains(final long key) { - return containsKey(key); - } - } - - /** Collection of values which supports optionally cached iterators to avoid allocation. */ - public final class ValueCollection extends AbstractCollection { - private final ValueIterator valueIterator = shouldAvoidAllocation ? new ValueIterator() : null; - - /** {@inheritDoc} */ - public ValueIterator iterator() { - ValueIterator valueIterator = this.valueIterator; - if (null == valueIterator) { - valueIterator = new ValueIterator(); - } - - valueIterator.reset(); - - return valueIterator; - } - - /** {@inheritDoc} */ - public int size() { - return Long2LongHashMap.this.size(); - } - - /** {@inheritDoc} */ - public boolean contains(final Object o) { - return contains((long) o); - } - - public boolean contains(final long key) { - return containsValue(key); - } - } - - /** Set of entries which supports optionally cached iterators to avoid allocation. */ - public final class EntrySet extends AbstractSet> implements Serializable { - private final EntryIterator entryIterator = shouldAvoidAllocation ? new EntryIterator() : null; - - /** {@inheritDoc} */ - public EntryIterator iterator() { - EntryIterator entryIterator = this.entryIterator; - if (null == entryIterator) { - entryIterator = new EntryIterator(); - } - - entryIterator.reset(); - - return entryIterator; - } - - /** {@inheritDoc} */ - public int size() { - return Long2LongHashMap.this.size(); - } - - /** {@inheritDoc} */ - public boolean isEmpty() { - return Long2LongHashMap.this.isEmpty(); - } - - /** {@inheritDoc} */ - public void clear() { - Long2LongHashMap.this.clear(); - } - - /** {@inheritDoc} */ - public boolean contains(final Object o) { - if (!(o instanceof Entry)) { - return false; - } - final Entry entry = (Entry) o; - final Long value = get(entry.getKey()); - - return value != null && value.equals(entry.getValue()); - } - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Long2ObjectHashMap.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Long2ObjectHashMap.java deleted file mode 100644 index a5b84eb4..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/Long2ObjectHashMap.java +++ /dev/null @@ -1,807 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Copyright 2014-2020 Real Logic Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package co.elastic.otel.profiler.collections; - -import static co.elastic.otel.profiler.collections.CollectionUtil.findNextPositivePowerOfTwo; -import static co.elastic.otel.profiler.collections.CollectionUtil.validateLoadFactor; -import static java.util.Objects.requireNonNull; - -import java.io.Serializable; -import java.util.AbstractCollection; -import java.util.AbstractSet; -import java.util.Arrays; -import java.util.Iterator; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.Objects; - -/** - * {@link Map} implementation specialised for long keys using open addressing and linear probing for - * cache efficient access. - * - * @param type of values stored in the {@link Map} - */ -public class Long2ObjectHashMap implements Map, Serializable { - static final int MIN_CAPACITY = 8; - - private final float loadFactor; - private int resizeThreshold; - private int size; - private final boolean shouldAvoidAllocation; - - private long[] keys; - private Object[] values; - - private ValueCollection valueCollection; - private KeySet keySet; - private EntrySet entrySet; - - public Long2ObjectHashMap() { - this(MIN_CAPACITY, Hashing.DEFAULT_LOAD_FACTOR, true); - } - - public Long2ObjectHashMap(final int initialCapacity, final float loadFactor) { - this(initialCapacity, loadFactor, true); - } - - /** - * Construct a new map allowing a configuration for initial capacity and load factor. - * - * @param initialCapacity for the backing array - * @param loadFactor limit for resizing on puts - * @param shouldAvoidAllocation should allocation be avoided by caching iterators and map entries. - */ - public Long2ObjectHashMap( - final int initialCapacity, final float loadFactor, final boolean shouldAvoidAllocation) { - validateLoadFactor(loadFactor); - - this.loadFactor = loadFactor; - this.shouldAvoidAllocation = shouldAvoidAllocation; - - /* */ - final int capacity = findNextPositivePowerOfTwo(Math.max(MIN_CAPACITY, initialCapacity)); - /* */ - resizeThreshold = (int) (capacity * loadFactor); - - keys = new long[capacity]; - values = new Object[capacity]; - } - - /** - * Copy construct a new map from an existing one. - * - * @param mapToCopy for construction. - */ - public Long2ObjectHashMap(final Long2ObjectHashMap mapToCopy) { - this.loadFactor = mapToCopy.loadFactor; - this.resizeThreshold = mapToCopy.resizeThreshold; - this.size = mapToCopy.size; - this.shouldAvoidAllocation = mapToCopy.shouldAvoidAllocation; - - keys = mapToCopy.keys.clone(); - values = mapToCopy.values.clone(); - } - - /** - * Get the load factor beyond which the map will increase size. - * - * @return load factor for when the map should increase size. - */ - public float loadFactor() { - return loadFactor; - } - - /** - * Get the total capacity for the map to which the load factor will be a fraction of. - * - * @return the total capacity for the map. - */ - public int capacity() { - return values.length; - } - - /** - * Get the actual threshold which when reached the map will resize. This is a function of the - * current capacity and load factor. - * - * @return the threshold when the map will resize. - */ - public int resizeThreshold() { - return resizeThreshold; - } - - /** {@inheritDoc} */ - public int size() { - return size; - } - - /** {@inheritDoc} */ - public boolean isEmpty() { - return 0 == size; - } - - /** {@inheritDoc} */ - public boolean containsKey(final Object key) { - return containsKey(((Long) key).longValue()); - } - - /** - * Overloaded version of {@link Map#containsKey(Object)} that takes a primitive long key. - * - * @param key for indexing the {@link Map} - * @return true if the key is found otherwise false. - */ - public boolean containsKey(final long key) { - final int mask = values.length - 1; - int index = Hashing.hash(key, mask); - - boolean found = false; - while (null != values[index]) { - if (key == keys[index]) { - found = true; - break; - } - - index = ++index & mask; - } - - return found; - } - - /** {@inheritDoc} */ - public boolean containsValue(final Object value) { - boolean found = false; - final Object val = mapNullValue(value); - if (null != val) { - for (final Object v : values) { - if (val.equals(v)) { - found = true; - break; - } - } - } - - return found; - } - - /** {@inheritDoc} */ - public V get(final Object key) { - return get(((Long) key).longValue()); - } - - /** - * Overloaded version of {@link Map#get(Object)} that takes a primitive long key. - * - * @param key for indexing the {@link Map} - * @return the value if found otherwise null - */ - public V get(final long key) { - return unmapNullValue(getMapped(key)); - } - - @SuppressWarnings("unchecked") - protected V getMapped(final long key) { - final int mask = values.length - 1; - int index = Hashing.hash(key, mask); - - Object value; - while (null != (value = values[index])) { - if (key == keys[index]) { - break; - } - - index = ++index & mask; - } - - return (V) value; - } - - /** {@inheritDoc} */ - public V put(final Long key, final V value) { - return put(key.longValue(), value); - } - - /** - * Overloaded version of {@link Map#put(Object, Object)} that takes a primitive long key. - * - * @param key for indexing the {@link Map} - * @param value to be inserted in the {@link Map} - * @return the previous value if found otherwise null - */ - @SuppressWarnings("unchecked") - public V put(final long key, final V value) { - final V val = (V) mapNullValue(value); - requireNonNull(val, "value cannot be null"); - - V oldValue = null; - final int mask = values.length - 1; - int index = Hashing.hash(key, mask); - - while (null != values[index]) { - if (key == keys[index]) { - oldValue = (V) values[index]; - break; - } - - index = ++index & mask; - } - - if (null == oldValue) { - ++size; - keys[index] = key; - } - - values[index] = val; - - if (size > resizeThreshold) { - increaseCapacity(); - } - - return unmapNullValue(oldValue); - } - - /** {@inheritDoc} */ - public V remove(final Object key) { - return remove(((Long) key).longValue()); - } - - /** - * Overloaded version of {@link Map#remove(Object)} that takes a primitive long key. - * - * @param key for indexing the {@link Map} - * @return the value if found otherwise null - */ - public V remove(final long key) { - final int mask = values.length - 1; - int index = Hashing.hash(key, mask); - - Object value; - while (null != (value = values[index])) { - if (key == keys[index]) { - values[index] = null; - --size; - - compactChain(index); - break; - } - - index = ++index & mask; - } - - return unmapNullValue(value); - } - - /** {@inheritDoc} */ - public void clear() { - if (size > 0) { - Arrays.fill(values, null); - size = 0; - } - } - - /** - * Compact the {@link Map} backing arrays by rehashing with a capacity just larger than current - * size and giving consideration to the load factor. - */ - public void compact() { - final int idealCapacity = (int) Math.round(size() * (1.0d / loadFactor)); - rehash(findNextPositivePowerOfTwo(Math.max(MIN_CAPACITY, idealCapacity))); - } - - /** {@inheritDoc} */ - public void putAll(final Map map) { - for (final Entry entry : map.entrySet()) { - put(entry.getKey(), entry.getValue()); - } - } - - /** {@inheritDoc} */ - public KeySet keySet() { - if (null == keySet) { - keySet = new KeySet(); - } - - return keySet; - } - - /** {@inheritDoc} */ - public ValueCollection values() { - if (null == valueCollection) { - valueCollection = new ValueCollection(); - } - - return valueCollection; - } - - /** {@inheritDoc} */ - public EntrySet entrySet() { - if (null == entrySet) { - entrySet = new EntrySet(); - } - - return entrySet; - } - - /** {@inheritDoc} */ - public String toString() { - if (isEmpty()) { - return "{}"; - } - - final EntryIterator entryIterator = new EntryIterator(); - entryIterator.reset(); - - final StringBuilder sb = new StringBuilder().append('{'); - while (true) { - entryIterator.next(); - sb.append(entryIterator.getLongKey()) - .append('=') - .append(unmapNullValue(entryIterator.getValue())); - if (!entryIterator.hasNext()) { - return sb.append('}').toString(); - } - sb.append(',').append(' '); - } - } - - /** {@inheritDoc} */ - public boolean equals(final Object o) { - if (this == o) { - return true; - } - - if (!(o instanceof Map)) { - return false; - } - - final Map that = (Map) o; - - if (size != that.size()) { - return false; - } - - for (int i = 0, length = values.length; i < length; i++) { - final Object thisValue = values[i]; - if (null != thisValue) { - final Object thatValue = that.get(keys[i]); - if (!thisValue.equals(mapNullValue(thatValue))) { - return false; - } - } - } - - return true; - } - - /** {@inheritDoc} */ - public int hashCode() { - int result = 0; - - for (int i = 0, length = values.length; i < length; i++) { - final Object value = values[i]; - if (null != value) { - result += (Hashing.hashCode(keys[i]) ^ value.hashCode()); - } - } - - return result; - } - - protected Object mapNullValue(final Object value) { - return value; - } - - @SuppressWarnings("unchecked") - protected V unmapNullValue(final Object value) { - return (V) value; - } - - /** - * Primitive specialised version of {@link #replace(Object, Object)} - * - * @param key key with which the specified value is associated - * @param value value to be associated with the specified key - * @return the previous value associated with the specified key, or {@code null} if there was no - * mapping for the key. - */ - public V replace(final long key, final V value) { - V curValue = get(key); - if (curValue != null) { - curValue = put(key, value); - } - - return curValue; - } - - /** - * Primitive specialised version of {@link #replace(Object, Object, Object)} - * - * @param key key with which the specified value is associated - * @param oldValue value expected to be associated with the specified key - * @param newValue value to be associated with the specified key - * @return {@code true} if the value was replaced - */ - public boolean replace(final long key, final V oldValue, final V newValue) { - final Object curValue = get(key); - if (curValue == null || !Objects.equals(unmapNullValue(curValue), oldValue)) { - return false; - } - - put(key, newValue); - - return true; - } - - private void increaseCapacity() { - final int newCapacity = values.length << 1; - if (newCapacity < 0) { - throw new IllegalStateException("max capacity reached at size=" + size); - } - - rehash(newCapacity); - } - - private void rehash(final int newCapacity) { - final int mask = newCapacity - 1; - /* */ - resizeThreshold = (int) (newCapacity * loadFactor); - - final long[] tempKeys = new long[newCapacity]; - final Object[] tempValues = new Object[newCapacity]; - - for (int i = 0, size = values.length; i < size; i++) { - final Object value = values[i]; - if (null != value) { - final long key = keys[i]; - int index = Hashing.hash(key, mask); - while (null != tempValues[index]) { - index = ++index & mask; - } - - tempKeys[index] = key; - tempValues[index] = value; - } - } - - keys = tempKeys; - values = tempValues; - } - - @SuppressWarnings("FinalParameters") - private void compactChain(int deleteIndex) { - final int mask = values.length - 1; - int index = deleteIndex; - while (true) { - index = ++index & mask; - if (null == values[index]) { - break; - } - - final int hash = Hashing.hash(keys[index], mask); - - if ((index < hash && (hash <= deleteIndex || deleteIndex <= index)) - || (hash <= deleteIndex && deleteIndex <= index)) { - keys[deleteIndex] = keys[index]; - values[deleteIndex] = values[index]; - - values[index] = null; - deleteIndex = index; - } - } - } - - /////////////////////////////////////////////////////////////////////////////////////////////// - // Sets and Collections - /////////////////////////////////////////////////////////////////////////////////////////////// - - /** Set of keys which supports optionally cached iterators to avoid allocation. */ - public final class KeySet extends AbstractSet implements Serializable { - private final KeyIterator keyIterator = shouldAvoidAllocation ? new KeyIterator() : null; - - /** {@inheritDoc} */ - public KeyIterator iterator() { - KeyIterator keyIterator = this.keyIterator; - if (null == keyIterator) { - keyIterator = new KeyIterator(); - } - - keyIterator.reset(); - return keyIterator; - } - - public int size() { - return Long2ObjectHashMap.this.size(); - } - - public boolean contains(final Object o) { - return Long2ObjectHashMap.this.containsKey(o); - } - - public boolean contains(final long key) { - return Long2ObjectHashMap.this.containsKey(key); - } - - public boolean remove(final Object o) { - return null != Long2ObjectHashMap.this.remove(o); - } - - public boolean remove(final long key) { - return null != Long2ObjectHashMap.this.remove(key); - } - - public void clear() { - Long2ObjectHashMap.this.clear(); - } - } - - /** Collection of values which supports optionally cached iterators to avoid allocation. */ - public final class ValueCollection extends AbstractCollection implements Serializable { - private final ValueIterator valueIterator = shouldAvoidAllocation ? new ValueIterator() : null; - - /** {@inheritDoc} */ - public ValueIterator iterator() { - ValueIterator valueIterator = this.valueIterator; - if (null == valueIterator) { - valueIterator = new ValueIterator(); - } - - valueIterator.reset(); - return valueIterator; - } - - public int size() { - return Long2ObjectHashMap.this.size(); - } - - public boolean contains(final Object o) { - return Long2ObjectHashMap.this.containsValue(o); - } - - public void clear() { - Long2ObjectHashMap.this.clear(); - } - } - - /** Set of entries which supports access via an optionally cached iterator to avoid allocation. */ - public final class EntrySet extends AbstractSet> implements Serializable { - private final EntryIterator entryIterator = shouldAvoidAllocation ? new EntryIterator() : null; - - /** {@inheritDoc} */ - public EntryIterator iterator() { - EntryIterator entryIterator = this.entryIterator; - if (null == entryIterator) { - entryIterator = new EntryIterator(); - } - - entryIterator.reset(); - return entryIterator; - } - - public int size() { - return Long2ObjectHashMap.this.size(); - } - - public void clear() { - Long2ObjectHashMap.this.clear(); - } - - /** {@inheritDoc} */ - public boolean contains(final Object o) { - final Entry entry = (Entry) o; - final long key = (Long) entry.getKey(); - final V value = getMapped(key); - return value != null && value.equals(mapNullValue(entry.getValue())); - } - } - - /////////////////////////////////////////////////////////////////////////////////////////////// - // Iterators - /////////////////////////////////////////////////////////////////////////////////////////////// - - abstract class AbstractIterator implements Iterator, Serializable { - private int posCounter; - private int stopCounter; - private int remaining; - boolean isPositionValid = false; - - protected final int position() { - return posCounter & (values.length - 1); - } - - public int remaining() { - return remaining; - } - - public boolean hasNext() { - return remaining > 0; - } - - protected final void findNext() { - if (!hasNext()) { - throw new NoSuchElementException(); - } - - final Object[] values = Long2ObjectHashMap.this.values; - final int mask = values.length - 1; - - for (int i = posCounter - 1; i >= stopCounter; i--) { - final int index = i & mask; - if (null != values[index]) { - posCounter = i; - isPositionValid = true; - --remaining; - return; - } - } - - isPositionValid = false; - throw new IllegalStateException(); - } - - public abstract T next(); - - public void remove() { - if (isPositionValid) { - final int position = position(); - values[position] = null; - --size; - - compactChain(position); - - isPositionValid = false; - } else { - throw new IllegalStateException(); - } - } - - final void reset() { - remaining = Long2ObjectHashMap.this.size; - final Object[] values = Long2ObjectHashMap.this.values; - final int capacity = values.length; - - int i = capacity; - if (null != values[capacity - 1]) { - for (i = 0; i < capacity; i++) { - if (null == values[i]) { - break; - } - } - } - - stopCounter = i; - posCounter = i + capacity; - isPositionValid = false; - } - } - - /** Iterator over values. */ - public class ValueIterator extends AbstractIterator { - public V next() { - findNext(); - - return unmapNullValue(values[position()]); - } - } - - /** Iterator over keys which supports access to unboxed keys. */ - public class KeyIterator extends AbstractIterator { - public Long next() { - return nextLong(); - } - - public long nextLong() { - findNext(); - - return keys[position()]; - } - } - - /** Iterator over entries which supports access to unboxed keys and values. */ - public class EntryIterator extends AbstractIterator> implements Entry { - public Entry next() { - findNext(); - if (shouldAvoidAllocation) { - return this; - } - - return allocateDuplicateEntry(); - } - - private Entry allocateDuplicateEntry() { - final long k = getLongKey(); - final V v = getValue(); - - return new Entry() { - public Long getKey() { - return k; - } - - public V getValue() { - return v; - } - - public V setValue(final V value) { - return Long2ObjectHashMap.this.put(k, value); - } - - public int hashCode() { - return Hashing.hashCode(getLongKey()) ^ (v != null ? v.hashCode() : 0); - } - - public boolean equals(final Object o) { - if (!(o instanceof Entry)) { - return false; - } - - final Entry e = (Entry) o; - - return (e.getKey() != null && e.getKey().equals(k)) - && ((e.getValue() == null && v == null) || e.getValue().equals(v)); - } - - public String toString() { - return k + "=" + v; - } - }; - } - - public Long getKey() { - return getLongKey(); - } - - public long getLongKey() { - return keys[position()]; - } - - public V getValue() { - return unmapNullValue(values[position()]); - } - - @SuppressWarnings("unchecked") - public V setValue(final V value) { - final V val = (V) mapNullValue(value); - requireNonNull(val, "value cannot be null"); - - if (!this.isPositionValid) { - throw new IllegalStateException(); - } - - final int pos = position(); - final Object oldValue = values[pos]; - values[pos] = val; - - return (V) oldValue; - } - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/LongHashSet.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/LongHashSet.java deleted file mode 100644 index 8089d550..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/LongHashSet.java +++ /dev/null @@ -1,704 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Copyright 2014-2020 Real Logic Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package co.elastic.otel.profiler.collections; - -import static co.elastic.otel.profiler.collections.CollectionUtil.findNextPositivePowerOfTwo; -import static co.elastic.otel.profiler.collections.CollectionUtil.validateLoadFactor; - -import java.io.Serializable; -import java.lang.reflect.Array; -import java.util.AbstractSet; -import java.util.Arrays; -import java.util.Collection; -import java.util.Iterator; -import java.util.NoSuchElementException; -import java.util.Set; - -/** - * Open-addressing with linear-probing expandable hash set. Allocation free in steady state use when - * expanded. - * - *

By storing elements as long primitives this significantly reduces memory consumption compared - * with Java's builtin HashSet<Long>. It implements Set<Long> - * for convenience, but calling functionality via those methods can add boxing overhead to your - * usage. - * - *

This class is not Threadsafe. - * - *

This HashSet caches its iterator object by default, so nested iteration is not supported. You - * can override this behaviour at construction by indicating that the iterator should not be cached. - * - * @see LongIterator - * @see Set - */ -public class LongHashSet extends AbstractSet implements Serializable { - /** The initial capacity used when none is specified in the constructor. */ - public static final int DEFAULT_INITIAL_CAPACITY = 8; - - static final long MISSING_VALUE = -1; - - private final boolean shouldAvoidAllocation; - private boolean containsMissingValue; - private final float loadFactor; - private int resizeThreshold; - // NB: excludes missing value - private int sizeOfArrayValues; - - private long[] values; - private LongIterator iterator; - - /** - * Construct a hash set with {@link #DEFAULT_INITIAL_CAPACITY}, {@link - * Hashing#DEFAULT_LOAD_FACTOR}, and iterator caching support. - */ - public LongHashSet() { - this(DEFAULT_INITIAL_CAPACITY); - } - - /** - * Construct a hash set with a proposed capacity, {@link Hashing#DEFAULT_LOAD_FACTOR}, and - * iterator caching support. - * - * @param proposedCapacity for the initial capacity of the set. - */ - public LongHashSet(final int proposedCapacity) { - this(proposedCapacity, Hashing.DEFAULT_LOAD_FACTOR, true); - } - - /** - * Construct a hash set with a proposed initial capacity, load factor, and iterator caching - * support. - * - * @param proposedCapacity for the initial capacity of the set. - * @param loadFactor to be used for resizing. - */ - public LongHashSet(final int proposedCapacity, final float loadFactor) { - this(proposedCapacity, loadFactor, true); - } - - /** - * Construct a hash set with a proposed initial capacity, load factor, and indicated iterator - * caching support. - * - * @param proposedCapacity for the initial capacity of the set. - * @param loadFactor to be used for resizing. - * @param shouldAvoidAllocation should the iterator be cached to avoid further allocation. - */ - public LongHashSet( - final int proposedCapacity, final float loadFactor, final boolean shouldAvoidAllocation) { - validateLoadFactor(loadFactor); - - this.shouldAvoidAllocation = shouldAvoidAllocation; - this.loadFactor = loadFactor; - sizeOfArrayValues = 0; - final int capacity = - findNextPositivePowerOfTwo(Math.max(DEFAULT_INITIAL_CAPACITY, proposedCapacity)); - resizeThreshold = (int) (capacity * loadFactor); // @DoNotSub - values = new long[capacity]; - Arrays.fill(values, MISSING_VALUE); - } - - /** - * Get the load factor beyond which the set will increase size. - * - * @return load factor for when the set should increase size. - */ - public float loadFactor() { - return loadFactor; - } - - /** - * Get the total capacity for the set to which the load factor with be a fraction of. - * - * @return the total capacity for the set. - */ - public int capacity() { - return values.length; - } - - /** - * Get the actual threshold which when reached the map will resize. This is a function of the - * current capacity and load factor. - * - * @return the threshold when the map will resize. - */ - public int resizeThreshold() { - return resizeThreshold; - } - - /** {@inheritDoc} */ - public boolean add(final Long value) { - return add(value.longValue()); - } - - /** - * Primitive specialised overload of {this#add(Long)} - * - * @param value the value to add - * @return true if the collection has changed, false otherwise - * @throws IllegalArgumentException if value is missingValue - */ - public boolean add(final long value) { - if (value == MISSING_VALUE) { - final boolean previousContainsMissingValue = this.containsMissingValue; - containsMissingValue = true; - return !previousContainsMissingValue; - } - - final long[] values = this.values; - final int mask = values.length - 1; - int index = Hashing.hash(value, mask); - - while (values[index] != MISSING_VALUE) { - if (values[index] == value) { - return false; - } - - index = next(index, mask); - } - - values[index] = value; - sizeOfArrayValues++; - - if (sizeOfArrayValues > resizeThreshold) { - increaseCapacity(); - } - - return true; - } - - private void increaseCapacity() { - final int newCapacity = values.length * 2; - if (newCapacity < 0) { - throw new IllegalStateException("max capacity reached at size=" + size()); - } - - rehash(newCapacity); - } - - private void rehash(final int newCapacity) { - final int capacity = newCapacity; - final int mask = newCapacity - 1; - resizeThreshold = (int) (newCapacity * loadFactor); // @DoNotSub - - final long[] tempValues = new long[capacity]; - Arrays.fill(tempValues, MISSING_VALUE); - - for (final long value : values) { - if (value != MISSING_VALUE) { - int newHash = Hashing.hash(value, mask); - while (tempValues[newHash] != MISSING_VALUE) { - newHash = ++newHash & mask; - } - - tempValues[newHash] = value; - } - } - - values = tempValues; - } - - /** {@inheritDoc} */ - public boolean remove(final Object value) { - return value instanceof Long && remove(((Long) value).longValue()); - } - - /** - * An long specialised version of {this#remove(Object)}. - * - * @param value the value to remove - * @return true if the value was present, false otherwise - */ - public boolean remove(final long value) { - if (value == MISSING_VALUE) { - final boolean previousContainsMissingValue = this.containsMissingValue; - containsMissingValue = false; - return previousContainsMissingValue; - } - - final long[] values = this.values; - final int mask = values.length - 1; - int index = Hashing.hash(value, mask); - - while (values[index] != MISSING_VALUE) { - if (values[index] == value) { - values[index] = MISSING_VALUE; - compactChain(index); - sizeOfArrayValues--; - return true; - } - - index = next(index, mask); - } - - return false; - } - - private static int next(final int index, final int mask) { - return (index + 1) & mask; - } - - @SuppressWarnings("FinalParameters") - void compactChain(int deleteIndex) { - final long[] values = this.values; - final int mask = values.length - 1; - - int index = deleteIndex; - while (true) { - index = next(index, mask); - if (values[index] == MISSING_VALUE) { - return; - } - - final int hash = Hashing.hash(values[index], mask); - - if ((index < hash && (hash <= deleteIndex || deleteIndex <= index)) - || (hash <= deleteIndex && deleteIndex <= index)) { - values[deleteIndex] = values[index]; - - values[index] = MISSING_VALUE; - deleteIndex = index; - } - } - } - - /** - * Compact the backing arrays by rehashing with a capacity just larger than current size and - * giving consideration to the load factor. - */ - public void compact() { - final int idealCapacity = (int) Math.round(size() * (1.0 / loadFactor)); - rehash(findNextPositivePowerOfTwo(Math.max(DEFAULT_INITIAL_CAPACITY, idealCapacity))); - } - - /** {@inheritDoc} */ - public boolean contains(final Object value) { - return value instanceof Long && contains(((Long) value).longValue()); - } - - /** - * Contains method that does not box values. - * - * @param value to be check for if the set contains it. - * @return true if the value is contained in the set otherwise false. - * @see Collection#contains(Object) - */ - public boolean contains(final long value) { - if (value == MISSING_VALUE) { - return containsMissingValue; - } - - final long[] values = this.values; - final int mask = values.length - 1; - int index = Hashing.hash(value, mask); - - while (values[index] != MISSING_VALUE) { - if (values[index] == value) { - return true; - } - - index = next(index, mask); - } - - return false; - } - - /** {@inheritDoc} */ - public int size() { - return sizeOfArrayValues + (containsMissingValue ? 1 : 0); - } - - /** {@inheritDoc} */ - public boolean isEmpty() { - return size() == 0; - } - - /** {@inheritDoc} */ - public void clear() { - if (size() > 0) { - Arrays.fill(values, MISSING_VALUE); - sizeOfArrayValues = 0; - containsMissingValue = false; - } - } - - /** {@inheritDoc} */ - public boolean addAll(final Collection coll) { - boolean added = false; - - for (final Long value : coll) { - added |= add(value); - } - - return added; - } - - /** - * Alias for {@link #addAll(Collection)} for the specialized case when adding another LongHashSet, - * avoids boxing and allocations - * - * @param coll containing the values to be added. - * @return {@code true} if this set changed as a result of the call - */ - public boolean addAll(final LongHashSet coll) { - boolean acc = false; - - for (final long value : coll.values) { - if (value != MISSING_VALUE) { - acc |= add(value); - } - } - - if (coll.containsMissingValue) { - acc |= add(MISSING_VALUE); - } - - return acc; - } - - /** - * LongHashSet specialised variant of {this#containsAll(Collection)}. - * - * @param other long hash set to compare against. - * @return true if every element in other is in this. - */ - public boolean containsAll(final LongHashSet other) { - for (final long value : other.values) { - if (value != MISSING_VALUE && !contains(value)) { - return false; - } - } - - return !other.containsMissingValue || this.containsMissingValue; - } - - /** - * Fast Path set difference for comparison with another LongHashSet. - * - *

Note: garbage free in the identical case, allocates otherwise. - * - * @param other the other set to subtract - * @return null if identical, otherwise the set of differences - */ - public LongHashSet difference(final LongHashSet other) { - LongHashSet difference = null; - - for (final long value : values) { - if (value != MISSING_VALUE && !other.contains(value)) { - if (difference == null) { - difference = new LongHashSet(); - } - - difference.add(value); - } - } - - if (other.containsMissingValue && !this.containsMissingValue) { - if (difference == null) { - difference = new LongHashSet(); - } - - difference.add(MISSING_VALUE); - } - - return difference; - } - - /** {@inheritDoc} */ - public boolean removeAll(final Collection coll) { - boolean removed = false; - - for (final Object value : coll) { - removed |= remove(value); - } - - return removed; - } - - /** - * Alias for {@link #removeAll(Collection)} for the specialized case when removing another - * LongHashSet, avoids boxing and allocations - * - * @param coll containing the values to be removed. - * @return {@code true} if this set changed as a result of the call - */ - public boolean removeAll(final LongHashSet coll) { - boolean acc = false; - - for (final long value : coll.values) { - if (value != MISSING_VALUE) { - acc |= remove(value); - } - } - - if (coll.containsMissingValue) { - acc |= remove(MISSING_VALUE); - } - - return acc; - } - - /** {@inheritDoc} */ - public LongIterator iterator() { - LongIterator iterator = this.iterator; - if (null == iterator) { - iterator = new LongIterator(); - if (shouldAvoidAllocation) { - this.iterator = iterator; - } - } - - return iterator.reset(); - } - - public void copy(final LongHashSet that) { - if (this.values.length != that.values.length) { - throw new IllegalArgumentException("cannot copy object: masks not equal"); - } - - System.arraycopy(that.values, 0, this.values, 0, this.values.length); - this.sizeOfArrayValues = that.sizeOfArrayValues; - this.containsMissingValue = that.containsMissingValue; - } - - /** {@inheritDoc} */ - public String toString() { - final StringBuilder sb = new StringBuilder(); - sb.append('{'); - - for (final long value : values) { - if (value != MISSING_VALUE) { - sb.append(value).append(", "); - } - } - - if (containsMissingValue) { - sb.append(MISSING_VALUE).append(", "); - } - - if (sb.length() > 1) { - sb.setLength(sb.length() - 2); - } - - sb.append('}'); - - return sb.toString(); - } - - /** {@inheritDoc} */ - @SuppressWarnings("unchecked") - public T[] toArray(final T[] a) { - final Class componentType = a.getClass().getComponentType(); - if (!componentType.isAssignableFrom(Long.class)) { - throw new ArrayStoreException("cannot store Longs in array of type " + componentType); - } - - final int size = size(); - final T[] arrayCopy = a.length >= size ? a : (T[]) Array.newInstance(componentType, size); - copyValues(arrayCopy); - - return arrayCopy; - } - - /** {@inheritDoc} */ - public Object[] toArray() { - final Object[] arrayCopy = new Object[size()]; - copyValues(arrayCopy); - - return arrayCopy; - } - - private void copyValues(final Object[] arrayCopy) { - int i = 0; - final long[] values = this.values; - for (final long value : values) { - if (MISSING_VALUE != value) { - arrayCopy[i++] = value; - } - } - - if (containsMissingValue) { - arrayCopy[sizeOfArrayValues] = MISSING_VALUE; - } - } - - /** {@inheritDoc} */ - public boolean equals(final Object other) { - if (other == this) { - return true; - } - - if (other instanceof LongHashSet) { - final LongHashSet otherSet = (LongHashSet) other; - - return otherSet.containsMissingValue == containsMissingValue - && otherSet.sizeOfArrayValues == sizeOfArrayValues - && containsAll(otherSet); - } - - if (!(other instanceof Set)) { - return false; - } - - final Set c = (Set) other; - if (c.size() != size()) { - return false; - } - - try { - return containsAll(c); - } catch (final ClassCastException | NullPointerException ignore) { - return false; - } - } - - /** {@inheritDoc} */ - public int hashCode() { - int hashCode = 0; - for (final long value : values) { - if (value != MISSING_VALUE) { - hashCode += Hashing.hash(value); - } - } - - if (containsMissingValue) { - hashCode += Hashing.hash(MISSING_VALUE); - } - - return hashCode; - } - - /** Iterator which supports unboxed access to values. */ - public final class LongIterator implements Iterator, Serializable { - private int remaining; - private int positionCounter; - private int stopCounter; - private boolean isPositionValid = false; - - LongIterator reset() { - remaining = size(); - - final long[] values = LongHashSet.this.values; - final int length = values.length; - int i = length; - - if (values[length - 1] != LongHashSet.MISSING_VALUE) { - for (i = 0; i < length; i++) { - if (values[i] == LongHashSet.MISSING_VALUE) { - break; - } - } - } - - stopCounter = i; - positionCounter = i + length; - isPositionValid = false; - - return this; - } - - public boolean hasNext() { - return remaining > 0; - } - - public int remaining() { - return remaining; - } - - public Long next() { - return nextValue(); - } - - /** - * Strongly typed alternative of {@link Iterator#next()} to avoid boxing. - * - * @return the next long value. - */ - public long nextValue() { - if (remaining == 1 && containsMissingValue) { - remaining = 0; - isPositionValid = true; - - return LongHashSet.MISSING_VALUE; - } - - findNext(); - - final long[] values = LongHashSet.this.values; - - return values[position(values)]; - } - - public void remove() { - if (isPositionValid) { - if (0 == remaining && containsMissingValue) { - containsMissingValue = false; - } else { - final long[] values = LongHashSet.this.values; - final int position = position(values); - values[position] = MISSING_VALUE; - --sizeOfArrayValues; - - compactChain(position); - } - - isPositionValid = false; - } else { - throw new IllegalStateException(); - } - } - - private void findNext() { - final long[] values = LongHashSet.this.values; - final int mask = values.length - 1; - isPositionValid = true; - - for (int i = positionCounter - 1; i >= stopCounter; i--) { - final int index = i & mask; - if (values[index] != LongHashSet.MISSING_VALUE) { - positionCounter = i; - --remaining; - return; - } - } - - isPositionValid = false; - throw new NoSuchElementException(); - } - - private int position(final long[] values) { - return positionCounter & (values.length - 1); - } - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/LongList.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/LongList.java deleted file mode 100644 index f5f0b158..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/LongList.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Copyright 2014-2020 Real Logic Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package co.elastic.otel.profiler.collections; - -import java.util.Arrays; - -public class LongList { - private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; - private static final int DEFAULT_CAPACITY = 16; - private long[] longs; - private int size; - - public LongList() { - this(DEFAULT_CAPACITY); - } - - public LongList(int initialCapacity) { - longs = new long[initialCapacity]; - } - - public static LongList of(long... values) { - LongList list = new LongList(values.length); - for (long value : values) { - list.add(value); - } - return list; - } - - public void add(long l) { - ensureCapacity(size + 1); - longs[size++] = l; - } - - public void addAll(LongList other) { - ensureCapacity(size + other.size); - System.arraycopy(other.longs, 0, longs, size, other.size); - size += other.size; - } - - private void ensureCapacity(long minCapacity) { - if (longs.length < minCapacity) { - longs = Arrays.copyOf(longs, newCapacity(minCapacity, longs.length)); - } - } - - static int newCapacity(long minCapacity, long oldCapacity) { - long growBy50Percent = oldCapacity + (oldCapacity >> 1); - if (minCapacity <= growBy50Percent) { - return (int) growBy50Percent; - } else if (minCapacity <= MAX_ARRAY_SIZE) { - return (int) minCapacity; - } else { - throw new OutOfMemoryError(); - } - } - - public int getSize() { - return size; - } - - public long get(int i) { - if (i >= size) { - throw new IndexOutOfBoundsException(); - } - return longs[i]; - } - - public boolean contains(long l) { - for (int i = 0; i < size; i++) { - if (longs[i] == l) { - return true; - } - } - return false; - } - - public boolean remove(long l) { - for (int i = size - 1; i >= 0; i--) { - if (longs[i] == l) { - remove(i); - return true; - } - } - return false; - } - - public long remove(int i) { - long previousValue = get(i); - size--; - if (size > i) { - System.arraycopy(longs, i + 1, longs, i, size - i); - } - longs[size] = 0; - return previousValue; - } - - public void clear() { - Arrays.fill(longs, 0); - size = 0; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append('['); - for (int i = 0; i < size; i++) { - if (i > 0) { - sb.append(','); - } - sb.append(longs[i]); - } - sb.append(']'); - return sb.toString(); - } - - public long[] toArray() { - return Arrays.copyOfRange(longs, 0, size); - } - - public boolean isEmpty() { - return size == 0; - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/LongLongConsumer.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/LongLongConsumer.java deleted file mode 100644 index d044e1df..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/LongLongConsumer.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Copyright 2014-2020 Real Logic Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package co.elastic.otel.profiler.collections; - -/** This is an (long, long) primitive specialisation of a BiConsumer */ -@FunctionalInterface -public interface LongLongConsumer { - /** - * Accept two values that comes as a tuple of longs. - * - * @param valueOne for the tuple. - * @param valueTwo for the tuple. - */ - void accept(long valueOne, long valueTwo); -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/package-info.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/package-info.java deleted file mode 100644 index 5df9446a..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/collections/package-info.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/** - * Copied from - * https://github.com/real-logic/agrona/tree/master/agrona/src/main/java/org/agrona/collections, - * which is under Apache License 2.0. - * - *

We can't use agrona as a regular dependency as it's compiled for Java 8 and we still support - * Java 7. That's why the relevant classes are copied over and methods referencing Java 8 types are - * removed. - */ -package co.elastic.otel.profiler.collections; diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/AbstractObjectPool.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/AbstractObjectPool.java deleted file mode 100644 index b39914b2..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/AbstractObjectPool.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler.pooling; - -import java.util.concurrent.atomic.AtomicInteger; -import javax.annotation.Nullable; - -public abstract class AbstractObjectPool implements ObjectPool { - - protected final Allocator allocator; - protected final Resetter resetter; - private final AtomicInteger garbageCreated; - - protected AbstractObjectPool(Allocator allocator, Resetter resetter) { - this.allocator = allocator; - this.resetter = resetter; - this.garbageCreated = new AtomicInteger(); - } - - @Override - public final T createInstance() { - T object = tryCreateInstance(); - if (object == null) { - // pool does not have available instance, falling back to creating a new one - object = allocator.createInstance(); - } - return object; - } - - @Override - public final void recycle(T obj) { - resetter.recycle(obj); - if (!returnToPool(obj)) { - // when not able to return object to pool, it means this object will be garbage-collected - garbageCreated.incrementAndGet(); - } - } - - public final long getGarbageCreated() { - return garbageCreated.longValue(); - } - - /** - * Pushes object reference back into the available pooled instances - * - * @param obj recycled object to return to pool - * @return true if object has been returned to pool, false if pool is already full - */ - protected abstract boolean returnToPool(T obj); - - /** - * Tries to create an instance in pool - * - * @return {@code null} if pool capacity is exhausted - */ - @Nullable - protected abstract T tryCreateInstance(); -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/Allocator.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/Allocator.java deleted file mode 100644 index b9ec01b0..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/Allocator.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler.pooling; - -/** - * Defines pooled object factory - * - * @param pooled object type - */ -public interface Allocator { - - /** - * @return new instance of pooled object type - */ - T createInstance(); -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/ObjectPool.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/ObjectPool.java deleted file mode 100644 index ceea2c96..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/ObjectPool.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler.pooling; - -import org.jctools.queues.MpmcArrayQueue; - -/** - * Object pool - * - * @param pooled object type. Does not have to implement {@link Recyclable} in order to allow - * for dealing with objects that are outside of elastic apm agent (like standard JDK or third - * party library classes). - */ -public interface ObjectPool { - - /** - * Tries to reuse any existing instance if pool has any, otherwise creates a new un-pooled - * instance - * - * @return object instance, either from pool or freshly allocated - */ - T createInstance(); - - /** - * Recycles an object - * - * @param obj object to recycle - */ - void recycle(T obj); - - void clear(); - - public static ObjectPool createRecyclable( - int capacity, Allocator allocator) { - return QueueBasedObjectPool.ofRecyclable(new MpmcArrayQueue<>(capacity), false, allocator); - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/QueueBasedObjectPool.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/QueueBasedObjectPool.java deleted file mode 100644 index a5d9b141..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/QueueBasedObjectPool.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler.pooling; - -import java.util.Queue; -import javax.annotation.Nullable; - -public class QueueBasedObjectPool extends AbstractObjectPool { - - private final Queue queue; - - /** - * Creates a queue based pooled for types that implement {@link Recyclable}, use {@link #of(Queue, - * boolean, Allocator, Resetter)} for other pooled object types. - * - * @param queue the underlying queue - * @param preAllocate when set to true, queue will be be pre-allocated with object instance. - * @param allocator a factory used to create new instances of the recyclable object. This factory - * is used when there are no objects in the queue and to preallocate the queue - */ - public static QueueBasedObjectPool ofRecyclable( - Queue queue, boolean preAllocate, Allocator allocator) { - return new QueueBasedObjectPool<>( - queue, preAllocate, allocator, Resetter.ForRecyclable.get()); - } - - /** - * Creates a queue based pooled for types that do not implement {@link Recyclable}, use {@link - * #ofRecyclable(Queue, boolean, Allocator)} for types that implement {@link Recyclable}. - * - * @param queue the underlying queue - * @param preAllocate when set to true, queue will be be pre-allocated with object instances - * fitting queue size - * @param allocator a factory used to create new instances of the recyclable object. This factory - * is used when there are no objects in the queue and to preallocate the queue - * @param resetter a reset strategy class - */ - public static QueueBasedObjectPool of( - Queue queue, - boolean preAllocate, - Allocator allocator, - Resetter resetter) { - return new QueueBasedObjectPool<>(queue, preAllocate, allocator, resetter); - } - - private QueueBasedObjectPool( - Queue queue, - boolean preAllocate, - Allocator allocator, - Resetter resetter) { - super(allocator, resetter); - this.queue = queue; - if (preAllocate) { - boolean addMore; - do { - addMore = queue.offer(allocator.createInstance()); - } while (addMore); - } - } - - @Nullable - @Override - public T tryCreateInstance() { - return queue.poll(); - } - - @Override - protected boolean returnToPool(T obj) { - return queue.offer(obj); - } - - public int getObjectsInPool() { - // as the size of the ring buffer is an int, this can never overflow - return queue.size(); - } - - @Override - public void clear() { - queue.clear(); - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/Recyclable.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/Recyclable.java deleted file mode 100644 index 8743ef0b..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/Recyclable.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler.pooling; - -public interface Recyclable { - - /** resets pooled object state so it can be reused */ - void resetState(); -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/Resetter.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/Resetter.java deleted file mode 100644 index c3088c25..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/pooling/Resetter.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler.pooling; - -/** - * Defines reset strategy to use for a given pooled object type when they are returned to pool - * - * @param pooled object type - */ -public interface Resetter { - - /** - * Recycles a pooled object state - * - * @param object object to recycle - */ - void recycle(T object); - - /** - * Resetter for objects that implement {@link Recyclable} - * - * @param recyclable object type - */ - @SuppressWarnings({"unchecked", "rawtypes"}) - class ForRecyclable implements Resetter { - private static final ForRecyclable INSTANCE = new ForRecyclable(); - - public static Resetter get() { - return INSTANCE; - } - - @Override - public void recycle(Recyclable object) { - object.resetState(); - } - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/util/ByteUtils.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/util/ByteUtils.java deleted file mode 100644 index b3e7b939..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/util/ByteUtils.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler.util; - -public class ByteUtils { - public static void putLong(byte[] buffer, int offset, long l) { - buffer[offset++] = (byte) (l >> 56); - buffer[offset++] = (byte) (l >> 48); - buffer[offset++] = (byte) (l >> 40); - buffer[offset++] = (byte) (l >> 32); - buffer[offset++] = (byte) (l >> 24); - buffer[offset++] = (byte) (l >> 16); - buffer[offset++] = (byte) (l >> 8); - buffer[offset] = (byte) l; - } - - public static long getLong(byte[] buffer, int offset) { - return ((long) buffer[offset] << 56) - | ((long) buffer[offset + 1] & 0xff) << 48 - | ((long) buffer[offset + 2] & 0xff) << 40 - | ((long) buffer[offset + 3] & 0xff) << 32 - | ((long) buffer[offset + 4] & 0xff) << 24 - | ((long) buffer[offset + 5] & 0xff) << 16 - | ((long) buffer[offset + 6] & 0xff) << 8 - | ((long) buffer[offset + 7] & 0xff); - } -} diff --git a/inferred-spans/src/main/java/co/elastic/otel/profiler/util/ThreadUtils.java b/inferred-spans/src/main/java/co/elastic/otel/profiler/util/ThreadUtils.java deleted file mode 100644 index ba36ec5f..00000000 --- a/inferred-spans/src/main/java/co/elastic/otel/profiler/util/ThreadUtils.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler.util; - -import java.lang.invoke.MethodHandle; -import java.lang.invoke.MethodHandles; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; - -public class ThreadUtils { - - private static final MethodHandle VIRTUAL_CHECKER = generateVirtualChecker(); - - public static boolean isVirtual(Thread thread) { - try { - return (boolean) VIRTUAL_CHECKER.invokeExact(thread); - } catch (Throwable e) { - throw new IllegalStateException("isVirtual is not expected to throw exceptions", e); - } - } - - private static MethodHandle generateVirtualChecker() { - Method isVirtual = null; - try { - isVirtual = Thread.class.getMethod("isVirtual"); - isVirtual.invoke( - Thread.currentThread()); // invoke to ensure it does not throw exceptions for preview - // versions - return MethodHandles.lookup().unreflect(isVirtual); - } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { - // virtual threads are not supported, therefore no thread is virtual - return MethodHandles.dropArguments( - MethodHandles.constant(boolean.class, false), 0, Thread.class); - } - } -} diff --git a/inferred-spans/src/test/java/co/elastic/otel/profiler/InferredSpansAutoConfigTest.java b/inferred-spans/src/test/java/co/elastic/otel/InferredSpansAutoConfigTest.java similarity index 50% rename from inferred-spans/src/test/java/co/elastic/otel/profiler/InferredSpansAutoConfigTest.java rename to inferred-spans/src/test/java/co/elastic/otel/InferredSpansAutoConfigTest.java index e64697c3..94120e78 100644 --- a/inferred-spans/src/test/java/co/elastic/otel/profiler/InferredSpansAutoConfigTest.java +++ b/inferred-spans/src/test/java/co/elastic/otel/InferredSpansAutoConfigTest.java @@ -16,32 +16,29 @@ * specific language governing permissions and limitations * under the License. */ -package co.elastic.otel.profiler; +package co.elastic.otel; import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat; -import static org.awaitility.Awaitility.await; -import co.elastic.otel.common.config.WildcardMatcher; import co.elastic.otel.testing.AutoConfigTestProperties; -import co.elastic.otel.testing.AutoConfiguredDataCapture; import co.elastic.otel.testing.DisabledOnOpenJ9; import co.elastic.otel.testing.OtelReflectionUtils; import io.opentelemetry.api.GlobalOpenTelemetry; import io.opentelemetry.api.OpenTelemetry; -import io.opentelemetry.api.trace.Span; -import io.opentelemetry.api.trace.Tracer; -import io.opentelemetry.context.Scope; +import io.opentelemetry.contrib.inferredspans.InferredSpansProcessor; +import io.opentelemetry.contrib.inferredspans.WildcardMatcher; +import io.opentelemetry.contrib.inferredspans.internal.InferredSpansConfiguration; +import io.opentelemetry.contrib.inferredspans.internal.ProfilingActivationListener; +import io.opentelemetry.contrib.inferredspans.internal.SamplingProfiler; import io.opentelemetry.sdk.trace.SpanProcessor; +import java.lang.reflect.Field; import java.nio.file.Path; import java.time.Duration; import java.util.List; -import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.condition.DisabledOnOs; -import org.junit.jupiter.api.condition.OS; import org.junit.jupiter.api.io.TempDir; public class InferredSpansAutoConfigTest { @@ -55,22 +52,22 @@ public void resetGlobalOtel() { @Test @DisabledOnOpenJ9 - public void checkAllOptions(@TempDir Path tmpDir) { + public void checkAllLegacyOptions(@TempDir Path tmpDir) { String libDir = tmpDir.resolve("foo").resolve("bar").toString(); try (AutoConfigTestProperties props = new AutoConfigTestProperties() - .put(InferredSpansAutoConfig.ENABLED_OPTION, "true") - .put(InferredSpansAutoConfig.LOGGING_OPTION, "false") - .put(InferredSpansAutoConfig.DIAGNOSTIC_FILES_OPTION, "true") - .put(InferredSpansAutoConfig.SAFEMODE_OPTION, "16") - .put(InferredSpansAutoConfig.POSTPROCESSING_OPTION, "false") - .put(InferredSpansAutoConfig.SAMPLING_INTERVAL_OPTION, "7ms") - .put(InferredSpansAutoConfig.MIN_DURATION_OPTION, "2ms") - .put(InferredSpansAutoConfig.INCLUDED_CLASSES_OPTION, "foo*23,bar.baz") - .put(InferredSpansAutoConfig.EXCLUDED_CLASSES_OPTION, "blub,test*.test2") - .put(InferredSpansAutoConfig.INTERVAL_OPTION, "2s") - .put(InferredSpansAutoConfig.DURATION_OPTION, "3s") - .put(InferredSpansAutoConfig.LIB_DIRECTORY_OPTION, libDir)) { + .put("elastic.otel.inferred.spans.enabled", "true") + .put("elastic.otel.inferred.spans.logging.enabled", "false") + .put("elastic.otel.inferred.spans.backup.diagnostic.files", "true") + .put("elastic.otel.inferred.spans.safe.mode", "16") + .put("elastic.otel.inferred.spans.post.processing.enabled", "false") + .put("elastic.otel.inferred.spans.sampling.interval", "7ms") + .put("elastic.otel.inferred.spans.min.duration", "2ms") + .put("elastic.otel.inferred.spans.included.classes", "foo*23,bar.baz") + .put("elastic.otel.inferred.spans.excluded.classes", "blub,test*.test2") + .put("elastic.otel.inferred.spans.interval", "2s") + .put("elastic.otel.inferred.spans.duration", "3s") + .put("elastic.otel.inferred.spans.lib.directory", libDir)) { OpenTelemetry otel = GlobalOpenTelemetry.get(); List processors = OtelReflectionUtils.getSpanProcessors(otel); @@ -82,7 +79,7 @@ public void checkAllOptions(@TempDir Path tmpDir) { .findFirst() .get(); - InferredSpansConfiguration config = processor.profiler.config; + InferredSpansConfiguration config = extractProfiler(processor).getConfig(); assertThat(config.isProfilingLoggingEnabled()).isFalse(); assertThat(config.isBackupDiagnosticFiles()).isTrue(); assertThat(config.getAsyncProfilerSafeMode()).isEqualTo(16); @@ -97,71 +94,22 @@ public void checkAllOptions(@TempDir Path tmpDir) { } } - @Test - public void checkDisabledbyDefault() { - try (AutoConfigTestProperties props = new AutoConfigTestProperties()) { - OpenTelemetry otel = GlobalOpenTelemetry.get(); - List processors = OtelReflectionUtils.getSpanProcessors(otel); - assertThat(processors).noneMatch(proc -> proc instanceof InferredSpansProcessor); + private SamplingProfiler extractProfiler(InferredSpansProcessor processor) { + try { + Field profilerField = processor.getClass().getDeclaredField("profiler"); + profilerField.setAccessible(true); + return (SamplingProfiler) profilerField.get(processor); + } catch (Exception e) { + throw new RuntimeException(e); } } - @DisabledOnOpenJ9 - @DisabledOnOs(OS.WINDOWS) @Test - public void checkProfilerWorking() { - try (AutoConfigTestProperties props = - new AutoConfigTestProperties() - .put(InferredSpansAutoConfig.ENABLED_OPTION, "true") - .put(InferredSpansAutoConfig.DURATION_OPTION, "500ms") - .put(InferredSpansAutoConfig.INTERVAL_OPTION, "500ms") - .put(InferredSpansAutoConfig.SAMPLING_INTERVAL_OPTION, "5ms")) { + public void checkDisabledbyDefault() { + try (AutoConfigTestProperties props = new AutoConfigTestProperties()) { OpenTelemetry otel = GlobalOpenTelemetry.get(); List processors = OtelReflectionUtils.getSpanProcessors(otel); - assertThat(processors).filteredOn(proc -> proc instanceof InferredSpansProcessor).hasSize(1); - InferredSpansProcessor processor = - (InferredSpansProcessor) - processors.stream() - .filter(proc -> proc instanceof InferredSpansProcessor) - .findFirst() - .get(); - - // Wait until profiler is started - await() - .pollDelay(10, TimeUnit.MILLISECONDS) - .timeout(6000, TimeUnit.MILLISECONDS) - .until(() -> processor.profiler.getProfilingSessions() > 1); - - Tracer tracer = otel.getTracer("manual-spans"); - - Span tx = tracer.spanBuilder("my-root").startSpan(); - try (Scope scope = tx.makeCurrent()) { - doSleep(); - } finally { - tx.end(); - } - - await() - .untilAsserted( - () -> - assertThat(AutoConfiguredDataCapture.getSpans()) - .hasSizeGreaterThanOrEqualTo(2) - .anySatisfy( - span -> { - assertThat(span.getName()).startsWith("InferredSpansAutoConfigTest#"); - assertThat(span.getInstrumentationScopeInfo().getName()) - .isEqualTo(InferredSpansProcessor.TRACER_NAME); - assertThat(span.getInstrumentationScopeInfo().getVersion()) - .isNotBlank(); - })); - } - } - - private void doSleep() { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - throw new RuntimeException(e); + assertThat(processors).noneMatch(proc -> proc instanceof InferredSpansProcessor); } } diff --git a/inferred-spans/src/test/java/co/elastic/otel/profiler/CallTreeSpanifyTest.java b/inferred-spans/src/test/java/co/elastic/otel/profiler/CallTreeSpanifyTest.java deleted file mode 100644 index ce04652b..00000000 --- a/inferred-spans/src/test/java/co/elastic/otel/profiler/CallTreeSpanifyTest.java +++ /dev/null @@ -1,274 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat; - -import co.elastic.otel.common.ElasticAttributes; -import co.elastic.otel.profiler.pooling.ObjectPool; -import co.elastic.otel.testing.DisabledOnOpenJ9; -import io.opentelemetry.api.trace.SpanContext; -import io.opentelemetry.api.trace.TraceFlags; -import io.opentelemetry.api.trace.TraceState; -import io.opentelemetry.sdk.OpenTelemetrySdk; -import io.opentelemetry.sdk.OpenTelemetrySdkBuilder; -import io.opentelemetry.sdk.testing.exporter.InMemorySpanExporter; -import io.opentelemetry.sdk.trace.SdkTracerProvider; -import io.opentelemetry.sdk.trace.data.SpanData; -import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; -import io.opentelemetry.semconv.incubating.CodeIncubatingAttributes; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.TimeUnit; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.condition.DisabledOnOs; -import org.junit.jupiter.api.condition.OS; - -class CallTreeSpanifyTest { - - static { - // we can't reset context storage wrappers between tests, so we msut ensure that it is - // registered before we create ANY Otel instance - ProfilingActivationListener.ensureInitialized(); - } - - @Test - @DisabledOnOs(OS.WINDOWS) - @DisabledOnOpenJ9 - void testSpanification() throws Exception { - FixedClock nanoClock = new FixedClock(); - try (ProfilerTestSetup setup = - ProfilerTestSetup.create( - config -> config.clock(nanoClock).startScheduledProfiling(false))) { - setup.profiler.setProfilingSessionOngoing(true); - CallTree.Root callTree = - CallTreeTest.getCallTree(setup, new String[] {" dd ", " cc ", " bbb ", "aaaaee"}); - assertThat(callTree.spanify(nanoClock, setup.sdk.getTracer("dummy-tracer"))).isEqualTo(4); - assertThat(setup.getSpans()).hasSize(5); - assertThat(setup.getSpans().stream().map(SpanData::getName)) - .containsExactly( - "Call Tree Root", - "CallTreeTest#a", - "CallTreeTest#b", - "CallTreeTest#d", - "CallTreeTest#e"); - - SpanData a = setup.getSpans().get(1); - assertThat(a).hasName("CallTreeTest#a"); - assertThat(a.getEndEpochNanos() - a.getStartEpochNanos()).isEqualTo(30_000_000); - assertThat(a.getAttributes().get(CodeIncubatingAttributes.CODE_STACKTRACE)).isBlank(); - assertThat(a).hasAttribute(ElasticAttributes.IS_INFERRED, true); - - SpanData b = setup.getSpans().get(2); - assertThat(b).hasName("CallTreeTest#b"); - assertThat(b.getEndEpochNanos() - b.getStartEpochNanos()).isEqualTo(20_000_000); - assertThat(b.getAttributes().get(CodeIncubatingAttributes.CODE_STACKTRACE)).isBlank(); - assertThat(b).hasAttribute(ElasticAttributes.IS_INFERRED, true); - - SpanData d = setup.getSpans().get(3); - assertThat(d).hasName("CallTreeTest#d"); - assertThat(d.getEndEpochNanos() - d.getStartEpochNanos()).isEqualTo(10_000_000); - assertThat(d.getAttributes().get(CodeIncubatingAttributes.CODE_STACKTRACE)) - .isEqualTo("at " + CallTreeTest.class.getName() + ".c(CallTreeTest.java)"); - assertThat(d).hasAttribute(ElasticAttributes.IS_INFERRED, true); - - SpanData e = setup.getSpans().get(4); - assertThat(e).hasName("CallTreeTest#e"); - assertThat(e.getEndEpochNanos() - e.getStartEpochNanos()).isEqualTo(10_000_000); - assertThat(e.getAttributes().get(CodeIncubatingAttributes.CODE_STACKTRACE)).isBlank(); - assertThat(e).hasAttribute(ElasticAttributes.IS_INFERRED, true); - } - } - - @Test - void testCallTreeWithActiveSpan() { - FixedClock nanoClock = new FixedClock(); - - String traceId = "0af7651916cd43dd8448eb211c80319c"; - String rootSpanId = "b7ad6b7169203331"; - TraceContext rootContext = - TraceContext.fromSpanContextWithZeroClockAnchor( - SpanContext.create( - traceId, rootSpanId, TraceFlags.getSampled(), TraceState.getDefault()), - null); - - ObjectPool rootPool = ObjectPool.createRecyclable(2, CallTree.Root::new); - ObjectPool childPool = ObjectPool.createRecyclable(2, CallTree::new); - - CallTree.Root root = CallTree.createRoot(rootPool, rootContext.serialize(), 0); - root.addStackTrace(Collections.singletonList(StackFrame.of("A", "a")), 0, childPool, 0); - - String childSpanId = "a1b2c3d4e5f64242"; - TraceContext spanContext = - TraceContext.fromSpanContextWithZeroClockAnchor( - SpanContext.create( - traceId, childSpanId, TraceFlags.getSampled(), TraceState.getDefault()), - rootSpanId); - - root.onActivation(spanContext.serialize(), TimeUnit.MILLISECONDS.toNanos(5)); - root.addStackTrace( - Arrays.asList(StackFrame.of("A", "b"), StackFrame.of("A", "a")), - TimeUnit.MILLISECONDS.toNanos(10), - childPool, - 0); - root.addStackTrace( - Arrays.asList(StackFrame.of("A", "b"), StackFrame.of("A", "a")), - TimeUnit.MILLISECONDS.toNanos(20), - childPool, - 0); - root.onDeactivation( - spanContext.serialize(), rootContext.serialize(), TimeUnit.MILLISECONDS.toNanos(25)); - - root.addStackTrace( - Collections.singletonList(StackFrame.of("A", "a")), - TimeUnit.MILLISECONDS.toNanos(30), - childPool, - 0); - root.end(childPool, 0); - - assertThat(root.getCount()).isEqualTo(4); - assertThat(root.getDurationUs()).isEqualTo(30_000); - assertThat(root.getChildren()).hasSize(1); - - CallTree a = root.getLastChild(); - assertThat(a).isNotNull(); - assertThat(a.getFrame().getMethodName()).isEqualTo("a"); - assertThat(a.getCount()).isEqualTo(4); - assertThat(a.getDurationUs()).isEqualTo(30_000); - assertThat(a.getChildren()).hasSize(1); - - CallTree b = a.getLastChild(); - assertThat(b).isNotNull(); - assertThat(b.getFrame().getMethodName()).isEqualTo("b"); - assertThat(b.getCount()).isEqualTo(2); - assertThat(b.getDurationUs()).isEqualTo(10_000); - assertThat(b.getChildren()).isEmpty(); - - InMemorySpanExporter exporter = InMemorySpanExporter.create(); - OpenTelemetrySdkBuilder sdkBuilder = - OpenTelemetrySdk.builder() - .setTracerProvider( - SdkTracerProvider.builder() - .addSpanProcessor(SimpleSpanProcessor.create(exporter)) - .build()); - - try (OpenTelemetrySdk outputSdk = sdkBuilder.build()) { - root.spanify(nanoClock, outputSdk.getTracer("dummy-tracer")); - - List spans = exporter.getFinishedSpanItems(); - assertThat(spans).hasSize(2); - assertThat(spans.get(0)).hasTraceId(traceId).hasParentSpanId(rootSpanId); - assertThat(spans.get(1)).hasTraceId(traceId).hasParentSpanId(childSpanId); - } - } - - @Test - void testSpanWithInvertedActivation() { - FixedClock nanoClock = new FixedClock(); - - String traceId = "0af7651916cd43dd8448eb211c80319c"; - String rootSpanId = "77ad6b7169203331"; - TraceContext rootContext = - TraceContext.fromSpanContextWithZeroClockAnchor( - SpanContext.create( - traceId, rootSpanId, TraceFlags.getSampled(), TraceState.getDefault()), - null); - - String childSpanId = "11b2c3d4e5f64242"; - TraceContext childSpanContext = - TraceContext.fromSpanContextWithZeroClockAnchor( - SpanContext.create( - traceId, childSpanId, TraceFlags.getSampled(), TraceState.getDefault()), - rootSpanId); - - ObjectPool rootPool = ObjectPool.createRecyclable(2, CallTree.Root::new); - ObjectPool childPool = ObjectPool.createRecyclable(2, CallTree::new); - - CallTree.Root root = CallTree.createRoot(rootPool, childSpanContext.serialize(), 0); - root.addStackTrace(Collections.singletonList(StackFrame.of("A", "a")), 10_000, childPool, 0); - - root.onActivation(rootContext.serialize(), 20_000); - root.onDeactivation(rootContext.serialize(), childSpanContext.serialize(), 30_000); - - root.addStackTrace(Collections.singletonList(StackFrame.of("A", "a")), 40_000, childPool, 0); - root.end(childPool, 0); - - InMemorySpanExporter exporter = InMemorySpanExporter.create(); - OpenTelemetrySdkBuilder sdkBuilder = - OpenTelemetrySdk.builder() - .setTracerProvider( - SdkTracerProvider.builder() - .addSpanProcessor(SimpleSpanProcessor.create(exporter)) - .build()); - try (OpenTelemetrySdk outputSdk = sdkBuilder.build()) { - root.spanify(nanoClock, outputSdk.getTracer("dummy-tracer")); - - List spans = exporter.getFinishedSpanItems(); - assertThat(spans).hasSize(1); - assertThat(spans.get(0)).hasTraceId(traceId).hasParentSpanId(childSpanId); - // the inferred span should not have any span links because this - // span link would cause a cycle in the trace - assertThat(spans.get(0).getLinks()).isEmpty(); - } - } - - @Test - void testSpanWithNestedActivation() { - FixedClock nanoClock = new FixedClock(); - - String traceId = "0af7651916cd43dd8448eb211c80319c"; - String rootSpanId = "77ad6b7169203331"; - TraceContext rootContext = - TraceContext.fromSpanContextWithZeroClockAnchor( - SpanContext.create( - traceId, rootSpanId, TraceFlags.getSampled(), TraceState.getDefault()), - null); - - ObjectPool rootPool = ObjectPool.createRecyclable(2, CallTree.Root::new); - ObjectPool childPool = ObjectPool.createRecyclable(2, CallTree::new); - - CallTree.Root root = CallTree.createRoot(rootPool, rootContext.serialize(), 0); - root.addStackTrace(Collections.singletonList(StackFrame.of("A", "a")), 10_000, childPool, 0); - - root.onActivation(rootContext.serialize(), 20_000); - root.onDeactivation(rootContext.serialize(), rootContext.serialize(), 30_000); - - root.addStackTrace(Collections.singletonList(StackFrame.of("A", "a")), 40_000, childPool, 0); - root.end(childPool, 0); - - InMemorySpanExporter exporter = InMemorySpanExporter.create(); - OpenTelemetrySdkBuilder sdkBuilder = - OpenTelemetrySdk.builder() - .setTracerProvider( - SdkTracerProvider.builder() - .addSpanProcessor(SimpleSpanProcessor.create(exporter)) - .build()); - try (OpenTelemetrySdk outputSdk = sdkBuilder.build()) { - root.spanify(nanoClock, outputSdk.getTracer("dummy-tracer")); - - List spans = exporter.getFinishedSpanItems(); - assertThat(spans).hasSize(1); - assertThat(spans.get(0)).hasTraceId(traceId).hasParentSpanId(rootSpanId); - // the inferred span should not have any span links because this - // span link would cause a cycle in the trace - assertThat(spans.get(0).getLinks()).isEmpty(); - } - } -} diff --git a/inferred-spans/src/test/java/co/elastic/otel/profiler/CallTreeTest.java b/inferred-spans/src/test/java/co/elastic/otel/profiler/CallTreeTest.java deleted file mode 100644 index 49bc4cc1..00000000 --- a/inferred-spans/src/test/java/co/elastic/otel/profiler/CallTreeTest.java +++ /dev/null @@ -1,1097 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat; -import static java.util.stream.Collectors.toMap; - -import co.elastic.otel.common.ElasticAttributes; -import co.elastic.otel.profiler.pooling.ObjectPool; -import co.elastic.otel.testing.DisabledOnOpenJ9; -import io.opentelemetry.api.trace.Span; -import io.opentelemetry.api.trace.SpanContext; -import io.opentelemetry.api.trace.Tracer; -import io.opentelemetry.context.Context; -import io.opentelemetry.context.Scope; -import io.opentelemetry.sdk.trace.data.LinkData; -import io.opentelemetry.sdk.trace.data.SpanData; -import io.opentelemetry.semconv.incubating.CodeIncubatingAttributes; -import java.io.IOException; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import java.util.stream.Collectors; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.condition.DisabledOnOs; -import org.junit.jupiter.api.condition.OS; - -@DisabledOnOs(OS.WINDOWS) -@DisabledOnOpenJ9 -class CallTreeTest { - - private ProfilerTestSetup profilerSetup; - - private FixedClock nanoClock; - - @BeforeEach - void setUp() { - nanoClock = new FixedClock(); - // disable scheduled profiling to not interfere with this test - profilerSetup = - ProfilerTestSetup.create(config -> config.clock(nanoClock).startScheduledProfiling(false)); - profilerSetup.profiler.setProfilingSessionOngoing(true); - } - - @AfterEach - void tearDown() throws IOException { - profilerSetup.close(); - } - - @Test - void testCallTree() { - TraceContext traceContext = new TraceContext(); - CallTree.Root root = - CallTree.createRoot( - ObjectPool.createRecyclable(100, CallTree.Root::new), traceContext.serialize(), 0); - ObjectPool callTreePool = ObjectPool.createRecyclable(100, CallTree::new); - root.addStackTrace(Arrays.asList(StackFrame.of("A", "a")), 0, callTreePool, 0); - root.addStackTrace( - Arrays.asList(StackFrame.of("A", "b"), StackFrame.of("A", "a")), - TimeUnit.MILLISECONDS.toNanos(10), - callTreePool, - 0); - root.addStackTrace( - Arrays.asList(StackFrame.of("A", "b"), StackFrame.of("A", "a")), - TimeUnit.MILLISECONDS.toNanos(20), - callTreePool, - 0); - root.addStackTrace( - Arrays.asList(StackFrame.of("A", "a")), TimeUnit.MILLISECONDS.toNanos(30), callTreePool, 0); - root.end(callTreePool, 0); - - System.out.println(root); - - assertThat(root.getCount()).isEqualTo(4); - assertThat(root.getDepth()).isEqualTo(0); - assertThat(root.getChildren()).hasSize(1); - - CallTree a = root.getLastChild(); - assertThat(a).isNotNull(); - assertThat(a.getFrame().getMethodName()).isEqualTo("a"); - assertThat(a.getCount()).isEqualTo(4); - assertThat(a.getChildren()).hasSize(1); - assertThat(a.getDepth()).isEqualTo(1); - assertThat(a.isSuccessor(root)).isTrue(); - - CallTree b = a.getLastChild(); - assertThat(b).isNotNull(); - assertThat(b.getFrame().getMethodName()).isEqualTo("b"); - assertThat(b.getCount()).isEqualTo(2); - assertThat(b.getChildren()).isEmpty(); - assertThat(b.getDepth()).isEqualTo(2); - assertThat(b.isSuccessor(a)).isTrue(); - assertThat(b.isSuccessor(root)).isTrue(); - } - - @Test - void testGiveEmptyChildIdsTo() { - CallTree rich = new CallTree(); - rich.addChildId(42, 0L); - CallTree robinHood = new CallTree(); - CallTree poor = new CallTree(); - - rich.giveLastChildIdTo(robinHood); - robinHood.giveLastChildIdTo(poor); - // list is not null but empty, expecting no exception - robinHood.giveLastChildIdTo(rich); - - assertThat(rich.hasChildIds()).isFalse(); - assertThat(robinHood.hasChildIds()).isFalse(); - assertThat(poor.hasChildIds()).isTrue(); - } - - @Test - void testTwoDistinctInvocationsOfMethodBShouldNotBeFoldedIntoOne() throws Exception { - assertCallTree( - new String[] {" bb bb", "aaaaaa"}, - new Object[][] { - {"a", 6}, - {" b", 2}, - {" b", 2} - }); - } - - @Test - void testBasicCallTree() throws Exception { - assertCallTree( - new String[] {" cc ", " bbb", "aaaa"}, - new Object[][] { - {"a", 4}, - {" b", 3}, - {" c", 2} - }, - new Object[][] { - {"a", 3}, - {" b", 2}, - {" c", 1} - }); - } - - @Test - void testShouldNotCreateInferredSpansForPillarsAndLeafShouldHaveStacktrace() throws Exception { - assertCallTree( - new String[] {" dd ", " cc ", " bb ", "aaaa"}, - new Object[][] { - {"a", 4}, - {" b", 2}, - {" c", 2}, - {" d", 2} - }, - new Object[][] { - {"a", 3}, - {" d", 1, Arrays.asList("c", "b")} - }); - } - - @Test - void testRemoveNodesWithCountOne() throws Exception { - assertCallTree( - new String[] {" b ", "aaa"}, new Object[][] {{"a", 3}}, new Object[][] {{"a", 2}}); - } - - @Test - void testSameTopOfStackDifferentBottom() throws Exception { - assertCallTree( - new String[] {"cccc", "aabb"}, - new Object[][] { - {"a", 2}, - {" c", 2}, - {"b", 2}, - {" c", 2}, - }); - } - - @Test - void testStackTraceWithRecursion() throws Exception { - assertCallTree( - new String[] {"bbccbbcc", "bbbbbbbb", "aaaaaaaa"}, - new Object[][] { - {"a", 8}, - {" b", 8}, - {" b", 2}, - {" c", 2}, - {" b", 2}, - {" c", 2}, - }); - } - - @Test - void testFirstInferredSpanShouldHaveNoStackTrace() throws Exception { - assertCallTree( - new String[] {"bb", "aa"}, - new Object[][] { - {"a", 2}, - {" b", 2}, - }, - new Object[][] { - {"b", 1}, - }); - } - - @Test - void testCallTreeWithSpanActivations() throws Exception { - assertCallTree( - new String[] {" cc ee ", " bbb dd ", " a aaaaaa a ", "1 2 2 1"}, - new Object[][] { - {"a", 8}, - {" b", 3}, - {" c", 2}, - {" d", 2}, - {" e", 2}, - }, - new Object[][] { - {"1", 11}, - {" a", 9}, - {" 2", 7}, - {" b", 2}, - {" c", 1}, - {" e", 1, Arrays.asList("d")}, - }); - } - - /* - * [1 ] [1 ] - * [a ] [a ] - * [2 ] ─┐ [b ] - * [b ] │ [c ] - * [c ] └► [2 ] - * [] [] - */ - @Test - void testDeactivationBeforeEnd() throws Exception { - assertCallTree( - new String[] { - " dd ", - " cccc c ", - " bbbb bb ", // <- deactivation for span 2 happens before b and c ends - " a aaaa aa ", // that means b and c must have started before 2 has been activated - "1 2 2 1" // but we saw the first stack trace of b only after the activation of 2 - }, - new Object[][] { - {"a", 7}, - {" b", 6}, - {" c", 5}, - {" d", 2}, - }, - new Object[][] { - {"1", 10}, - {" a", 8}, - {" b", 7}, - {" c", 6}, - {" 2", 5}, - {" d", 1}, - }); - } - - /* - * [1 ] [1 ] - * [a ] [a ] - * [2 ] [3] [b ][3] <- b is supposed to stealChildIdsFom(a) - * [b ] [2 ] however, it should only steal 2, not 3 - */ - @Test - void testDectivationBeforeEnd2() throws Exception { - assertCallTree( - new String[] {" bbbb b ", " a aaaa a a a ", "1 2 2 3 3 1"}, - new Object[][] { - {"a", 8}, - {" b", 5}, - }, - new Object[][] { - {"1", 13}, - {" a", 11}, - {" b", 6}, - {" 2", 5}, - {" 3", 2}, - }); - } - - /* - * [a ] [a ] - * [1] [1] - * [2] [c ] - * [b] [b ] <- b should steal 2 but not 1 from a - * [c] [2] - */ - @Test - void testDectivationBeforeEnd_DontStealChildIdsOfUnrelatedActivations() throws Exception { - Map spans = - assertCallTree( - new String[] {" c c ", " b b ", "a a a aa", " 1 1 2 2 "}, - new Object[][] { - {"a", 5}, - {" b", 2}, - {" c", 2}, - }, - new Object[][] { - {"a", 9}, - {" 1", 2}, - {" c", 3, Arrays.asList("b")}, - {" 2", 2}, - }); - assertThat(spans.get("a").getLinks()) - .hasSize(1) - .anySatisfy( - link -> assertThat(link.getAttributes()).containsEntry("elastic.is_child", true)); - assertThat(spans.get("c").getLinks()) - .hasSize(1) - .anySatisfy( - link -> assertThat(link.getAttributes()).containsEntry("elastic.is_child", true)); - } - - /* - * [a ] [a ] - * [1] [1] - * [2 ] [c ] <- this is an open issue: c should start when 2 starts but starts with 3 starts - * [3] [2 ] - * [c ] [3] - */ - @Test - void testDectivationBeforeEnd_DontStealChildIdsOfUnrelatedActivations_Nested() throws Exception { - Map spans = - assertCallTree( - new String[] {" c c ", " b b ", "a a a aa", " 1 1 23 32 "}, - new Object[][] { - {"a", 5}, - {" b", 2}, - {" c", 2}, - }, - new Object[][] { - {"a", 11}, - {" 1", 2}, - {" c", 4, Arrays.asList("b")}, - {" 2", 4}, - {" 3", 2}, - }); - assertThat(spans.get("a").getLinks()) - .hasSize(1) - .anySatisfy( - link -> assertThat(link.getAttributes()).containsEntry("elastic.is_child", true)); - assertThat(spans.get("c").getLinks()) - .hasSize(1) - .anySatisfy( - link -> assertThat(link.getAttributes()).containsEntry("elastic.is_child", true)); - } - - /* - * [a ] [a ] - * [b[1] - > [b[1] - */ - @Test - void testActivationAfterMethodEnds() throws Exception { - assertCallTree( - new String[] {"bb ", "aa a ", " 1 1"}, - new Object[][] { - {"a", 3}, - {" b", 2}, - }, - new Object[][] { - {"a", 3}, - {" b", 1}, - {" 1", 2} - }); - } - - /* - * [a ] - * [b[1] - */ - @Test - void testActivationBetweenMethods() throws Exception { - assertCallTree( - new String[] {"bb ", "aa a", " 11 "}, - new Object[][] { - {"a", 3}, - {" b", 2}, - }, - new Object[][] { - {"a", 4}, - {" b", 1}, - {" 1", 1}, - }); - } - - /* - * [a ] - * [b[1] - * c - */ - @Test - void testActivationBetweenMethods_AfterFastMethod() throws Exception { - assertCallTree( - new String[] {" c ", "bb ", "aa a", " 11 "}, - new Object[][] { - {"a", 3}, - {" b", 2}, - }, - new Object[][] { - {"a", 4}, - {" b", 1}, - {" 1", 1}, - }); - } - - /* - * [a ] - * [b] - * 1 - */ - @Test - void testActivationBetweenFastMethods() throws Exception { - assertCallTree( - new String[] {"c d ", "b b ", "a a a", " 11 22 "}, - new Object[][] { - {"a", 3}, - {" b", 2}, - }, - new Object[][] { - {"a", 6}, - {" b", 3}, - {" 1", 1}, - {" 2", 1}, - }); - } - - /* */ - /* - * [a ] - * [b] [1 [c] - */ - /* - @Test - void testActivationBetweenMethods_WithCommonAncestor() throws Exception { - assertCallTree(new String[]{ - " c f g ", - "bbb e d dd", - "aaa a a aa", - " 11 22 33 " - }, new Object[][] { - {"a", 7}, - {" b", 3}, - {" d", 3}, - }, new Object[][] { - {"a", 12}, - {" b", 2}, - {" 1", 1}, - {" 2", 1}, - {" d", 4}, - {" 3", 1}, - }); - }*/ - - /* - * [a ] - * [1 ] - * [2] - */ - @Test - void testNestedActivation() throws Exception { - assertCallTree( - new String[] {"a a a", " 12 21 "}, - new Object[][] { - {"a", 3}, - }, - new Object[][] { - {"a", 6}, - {" 1", 4}, - {" 2", 2}, - }); - } - - /* - * [1 ] - * [a][2 ] - * [b] [3 ] - * [c] - */ - @Test - void testNestedActivationAfterMethodEnds_RootChangesToC() throws Exception { - Map spans = - assertCallTree( - new String[] {" bbb ", " aaa ccc ", "1 23 321"}, - new Object[][] { - {"a", 3}, - {" b", 3}, - {"c", 3}, - }, - new Object[][] { - {"1", 11}, - {" b", 2, Arrays.asList("a")}, - {" 2", 6}, - {" 3", 4}, - {" c", 2} - }); - - assertThat(spans.get("b").getLinks()).isEmpty(); - } - - /* - * [1 ] - * [a ][3 ] - * [b ] [4 ] - * [2] [c] - */ - @Test - void testRegularActivationFollowedByNestedActivationAfterMethodEnds() throws Exception { - assertCallTree( - new String[] {" d ", " b b b ", " a a a ccc ", "1 2 2 34 431"}, - new Object[][] { - {"a", 3}, - {" b", 3}, - {"c", 3}, - }, - new Object[][] { - {"1", 13}, - {" b", 4, Arrays.asList("a")}, - {" 2", 2}, - {" 3", 6}, - {" 4", 4}, - {" c", 2} - }); - } - - /* - * [1 ] - * [a ] - * [b ][3 ] - * [2] [4 ] - * [c] - */ - @Test - void testNestedActivationAfterMethodEnds_CommonAncestorA() throws Exception { - Map spans = - assertCallTree( - new String[] {" b b b ccc ", " aa a a aaa a ", "1 2 2 34 43 1"}, - new Object[][] { - {"a", 8}, - {" b", 3}, - {" c", 3}, - }, - new Object[][] { - {"1", 15}, - {" a", 13}, - {" b", 4}, - {" 2", 2}, - {" 3", 6}, - {" 4", 4}, - {" c", 2} - }); - - assertThat(spans.get("b").getLinks()) - .hasSize(1) - .anySatisfy( - link -> { - assertThat(link.getAttributes()).containsEntry("elastic.is_child", true); - SpanData expectedSpan = spans.get("2"); - assertThat(link.getSpanContext().getTraceId()).isEqualTo(expectedSpan.getTraceId()); - assertThat(link.getSpanContext().getSpanId()).isEqualTo(expectedSpan.getSpanId()); - }); - - assertThat(spans.get("c").getLinks()).isEmpty(); - - assertThat(spans.get("a").getLinks()) - .hasSize(1) - .anySatisfy( - link -> { - assertThat(link.getAttributes()).containsEntry("elastic.is_child", true); - SpanData expectedSpan = spans.get("3"); - assertThat(link.getSpanContext().getTraceId()).isEqualTo(expectedSpan.getTraceId()); - assertThat(link.getSpanContext().getSpanId()).isEqualTo(expectedSpan.getSpanId()); - }); - } - - /* - * [1 ] - * [a] - * [2 ] - * [b] - * [c] - */ - @Test - void testActivationAfterMethodEnds_RootChangesToB() throws Exception { - assertCallTree( - new String[] {" ccc ", " aaa bbb ", "1 2 21"}, - new Object[][] { - {"a", 3}, - {"b", 3}, - {" c", 3}, - }, - new Object[][] { - {"1", 9}, - {" a", 2}, - {" 2", 4}, - {" c", 2, Arrays.asList("b")} - }); - } - - /* - * [1 ] - * [a] - * [2 ] - * [b] - */ - @Test - void testActivationAfterMethodEnds_RootChangesToB2() throws Exception { - assertCallTree( - new String[] {" aaa bbb ", "1 2 21"}, - new Object[][] { - {"a", 3}, - {"b", 3}, - }, - new Object[][] { - {"1", 9}, - {" a", 2}, - {" 2", 4}, - {" b", 2} - }); - } - - /* - * [1] - * [a] - @Test - void testActivationBeforeCallTree() throws Exception { - assertCallTree(new String[]{ - " aaa", - "1 1 " - }, new Object[][] { - {"a", 3}, - }, new Object[][] { - {"a", 3}, - {" 1", 2}, - }); - } */ - - /* - * [1 ] - * [a ] - * [2 ] - * [b] - * [c] - */ - @Test - void testActivationAfterMethodEnds_SameRootDeeperStack() throws Exception { - assertCallTree( - new String[] {" ccc ", " aaa aaa ", "1 2 21"}, - new Object[][] { - {"a", 6}, - {" c", 3}, - }, - new Object[][] { - {"1", 9}, - {" a", 6}, - {" 2", 4}, - {" c", 2} - }); - } - - /* - * [1 ] - * [a ] - * [2 ] - * [b] - */ - @Test - void testActivationBeforeMethodStarts() throws Exception { - assertCallTree( - new String[] {" bbb ", " a aaa a ", "1 2 2 1"}, - new Object[][] { - {"a", 5}, - {" b", 3}, - }, - new Object[][] { - {"1", 8}, - {" a", 6}, - {" 2", 4}, - {" b", 2} - }); - } - - /* - * [1 ] [1 ] - * [a ] [a ] - * [b ] -> [b ] - * [c ] -> [c ] - * [2 ] [2 ] - * [] [] - */ - @Test - void testDectivationAfterEnd() throws Exception { - assertCallTree( - new String[] { - " dd ", - " c ccc ", - " bb bbb ", // <- deactivation for span 2 happens after b ends - " aaa aaa aa ", // that means b must have ended after 2 has been deactivated - "1 2 2 1" // but we saw the last stack trace of b before the deactivation of 2 - }, - new Object[][] { - {"a", 8}, - {" b", 5}, - {" c", 4}, - {" d", 2}, - }, - new Object[][] { - {"1", 11}, - {" a", 9}, - {" b", 6}, - {" c", 5}, - {" 2", 4}, - {" d", 1}, - }); - } - - @Test - void testCallTreeActivationAsParentOfFastSpan() throws Exception { - assertCallTree( - new String[] {" b ", " aa a aa ", "1 2 2 1"}, - new Object[][] {{"a", 5}}, - new Object[][] { - {"1", 8}, - {" a", 6}, - {" 2", 2}, - }); - } - - @Test - void testCallTreeActivationAsChildOfFastSpan() throws Exception { - profilerSetup.close(); - profilerSetup = - ProfilerTestSetup.create( - config -> - config - .inferredSpansMinDuration(Duration.ofMillis(50)) - .clock(nanoClock) - .startScheduledProfiling(false)); - profilerSetup.profiler.setProfilingSessionOngoing(true); - assertCallTree( - new String[] {" c c ", " b b ", " aaa aaa ", "1 22 1"}, - new Object[][] {{"a", 6}}, - new Object[][] { - {"1", 9}, - {" a", 7}, - {" 2", 1}, - }); - } - - @Test - void testCallTreeActivationAsLeaf() throws Exception { - assertCallTree( - new String[] {" aa aa ", "1 22 1"}, - new Object[][] {{"a", 4}}, - new Object[][] { - {"1", 7}, - {" a", 5}, - {" 2", 1}, - }); - } - - @Test - void testCallTreeMultipleActivationsAsLeaf() throws Exception { - assertCallTree( - new String[] {" aa aaa aa ", "1 22 33 1"}, - new Object[][] {{"a", 7}}, - new Object[][] { - {"1", 12}, - {" a", 10}, - {" 2", 1}, - {" 3", 1}, - }); - } - - @Test - void testCallTreeMultipleActivationsAsLeafWithExcludedParent() throws Exception { - profilerSetup.close(); - profilerSetup = - ProfilerTestSetup.create( - config -> - config - .clock(nanoClock) - .startScheduledProfiling(false) - .inferredSpansMinDuration(Duration.ofMillis(50))); - profilerSetup.profiler.setProfilingSessionOngoing(true); - // min duration 4 - assertCallTree( - new String[] {" b b c c ", " aa aaa aa ", "1 22 33 1"}, - new Object[][] {{"a", 7}}, - new Object[][] { - {"1", 12}, - {" a", 10}, - {" 2", 1}, - {" 3", 1}, - }); - } - - @Test - void testCallTreeMultipleActivationsWithOneChild() throws Exception { - assertCallTree( - new String[] {" bb ", " aa aaa aa aa ", "1 22 3 3 1"}, - new Object[][] { - {"a", 9}, - {" b", 2} - }, - new Object[][] { - {"1", 14}, - {" a", 12}, - {" 2", 1}, - {" 3", 3}, - {" b", 1}, - }); - } - - /* - * [1 ] [1 ] - * [2] -> [a ] - * [a] [2] - * - * Note: this test is currently failing - */ - @Test - @Disabled("fix me") - void testNestedActivationBeforeCallTree() throws Exception { - assertCallTree( - new String[] {" aaa ", "12 2 1"}, - new Object[][] { - {"a", 3}, - }, - new Object[][] { - {"1", 5}, - {" a", 3}, // a is actually a child of the transaction - {" 2", 2}, // 2 is not within the child_ids of a - }); - } - - private void assertCallTree(String[] stackTraces, Object[][] expectedTree) throws Exception { - assertCallTree(stackTraces, expectedTree, null); - } - - private Map assertCallTree( - String[] stackTraces, Object[][] expectedTree, @Nullable Object[][] expectedSpans) - throws Exception { - CallTree.Root root = getCallTree(profilerSetup, stackTraces); - StringBuilder expectedResult = new StringBuilder(); - for (int i = 0; i < expectedTree.length; i++) { - Object[] objects = expectedTree[i]; - expectedResult.append(objects[0]).append(" ").append(objects[1]); - if (i != expectedTree.length - 1) { - expectedResult.append("\n"); - } - } - - String actualResult = root.toString().replace(CallTreeTest.class.getName() + ".", ""); - actualResult = - Arrays.stream(actualResult.split("\n")) - // skip root node - .skip(1) - // trim first two spaces - .map(s -> s.substring(2)) - .collect(Collectors.joining("\n")); - - assertThat(actualResult).isEqualTo(expectedResult.toString()); - - if (expectedSpans != null) { - root.spanify(nanoClock, profilerSetup.sdk.getTracer("dummy-inferred-spans-tracer")); - Map spans = - profilerSetup.getSpans().stream() - .collect(toMap(s -> s.getName().replaceAll(".*#", ""), Function.identity())); - assertThat(profilerSetup.getSpans()).hasSize(expectedSpans.length + 1); - - for (int i = 0; i < expectedSpans.length; i++) { - Object[] expectedSpan = expectedSpans[i]; - String spanName = ((String) expectedSpan[0]).trim(); - long durationMs = (int) expectedSpan[1] * 10; - List stackTrace = - expectedSpan.length == 3 ? (List) expectedSpan[2] : Arrays.asList(); - int nestingLevel = getNestingLevel((String) expectedSpan[0]); - String parentName = getParentName(expectedSpans, i, nestingLevel); - if (parentName == null) { - parentName = "Call Tree Root"; - } - assertThat(spans).containsKey(spanName); - assertThat(spans).containsKey(parentName); - SpanData span = spans.get(spanName); - assertThat(isChild(spans.get(parentName), span)) - .withFailMessage( - "Expected %s (%s) to be a child of %s (%s) but was %s (%s)", - spanName, - span.getSpanContext().getSpanId(), - parentName, - spans.get(parentName).getSpanId(), - profilerSetup.getSpans().stream() - .filter(s -> s.getSpanId().equals(span.getParentSpanId())) - .findAny() - .map(SpanData::getName) - .orElse(null), - span.getParentSpanId()) - .isTrue(); - assertThat(isChild(span, spans.get(parentName))) - .withFailMessage( - "Expected %s (%s) to not be a child of %s (%s) but was %s (%s)", - parentName, - spans.get(parentName).getSpanId(), - spanName, - span.getSpanId(), - profilerSetup.getSpans().stream() - .filter(s -> s.getSpanId().equals(span.getParentSpanId())) - .findAny() - .map(SpanData::getName) - .orElse(null), - span.getParentSpanId()) - .isFalse(); - assertThat(span.getEndEpochNanos() - span.getStartEpochNanos()) - .describedAs("Unexpected duration for span %s", span) - .isEqualTo(durationMs * 1_000_000L); - - String actualStacktrace = - span.getAttributes().get(CodeIncubatingAttributes.CODE_STACKTRACE); - if (stackTrace == null || stackTrace.isEmpty()) { - assertThat(actualStacktrace).isBlank(); - } else { - String expected = - stackTrace.stream() - .map( - funcName -> - "at " - + CallTreeTest.class.getName() - + "." - + funcName - + "(CallTreeTest.java)") - .collect(Collectors.joining("\n")); - assertThat(actualStacktrace).isEqualTo(expected); - } - } - return spans; - } - return null; - } - - public boolean isChild(SpanData parent, SpanData expectedChild) { - if (!parent.getTraceId().equals(expectedChild.getTraceId())) { - return false; - } - if (parent.getSpanId().equals(expectedChild.getParentSpanId())) { - return true; - } - for (LinkData link : parent.getLinks()) { - Boolean isChild = link.getAttributes().get(ElasticAttributes.IS_CHILD); - if (isChild != null && isChild) { - SpanContext linkSpanCtx = link.getSpanContext(); - if (linkSpanCtx.getTraceId().equals(expectedChild.getTraceId()) - && linkSpanCtx.getSpanId().equals(expectedChild.getSpanId())) { - return true; - } - } - } - - return false; - } - - @Nullable - private String getParentName(@Nonnull Object[][] expectedSpans, int i, int nestingLevel) { - if (nestingLevel > 0) { - for (int j = i - 1; j >= 0; j--) { - String name = (String) expectedSpans[j][0]; - boolean isParent = getNestingLevel(name) == nestingLevel - 1; - if (isParent) { - return name.trim(); - } - } - } - return null; - } - - private int getNestingLevel(String spanName) { - // nesting is denoted by two spaces - return ((spanName).length() - 1) / 2; - } - - public static CallTree.Root getCallTree(ProfilerTestSetup profilerSetup, String[] stackTraces) - throws Exception { - SamplingProfiler profiler = profilerSetup.profiler; - FixedClock nanoClock = (FixedClock) profilerSetup.profiler.getClock(); - nanoClock.setNanoTime(1); - profiler.setProfilingSessionOngoing(true); - - CallTree.Root root = null; - ObjectPool callTreePool = ObjectPool.createRecyclable(2, CallTree::new); - Map spanMap = new HashMap<>(); - Map spanScopeMap = new HashMap<>(); - - Tracer tracer = profilerSetup.sdk.getTracer("testing-tracer"); - - Span transaction = - tracer.spanBuilder("Call Tree Root").setStartTimestamp(1, TimeUnit.NANOSECONDS).startSpan(); - try (Scope scope = transaction.makeCurrent()) { - List stackTraceEvents = new ArrayList<>(); - for (int i = 0; i < stackTraces[0].length(); i++) { - nanoClock.setNanoTime(1 + i * TimeUnit.MILLISECONDS.toNanos(10)); - List trace = new ArrayList<>(); - for (String stackTrace : stackTraces) { - char c = stackTrace.charAt(i); - if (Character.isDigit(c)) { - handleSpanEvent( - tracer, spanMap, spanScopeMap, Character.toString(c), nanoClock.nanoTime()); - break; - } else if (!Character.isSpaceChar(c)) { - trace.add(StackFrame.of(CallTreeTest.class.getName(), Character.toString(c))); - } - } - if (!trace.isEmpty()) { - stackTraceEvents.add(new StackTraceEvent(trace, nanoClock.nanoTime())); - } - } - - profiler.consumeActivationEventsFromRingBufferAndWriteToFile(); - long eof = profiler.startProcessingActivationEventsFile(); - for (StackTraceEvent stackTraceEvent : stackTraceEvents) { - profiler.processActivationEventsUpTo(stackTraceEvent.nanoTime, eof); - if (root == null) { - root = profiler.getRoot(); - assertThat(root).isNotNull(); - } - long millis = profilerSetup.profiler.config.getInferredSpansMinDuration().toMillis(); - root.addStackTrace( - stackTraceEvent.trace, - stackTraceEvent.nanoTime, - callTreePool, - TimeUnit.MILLISECONDS.toNanos(millis)); - } - - } finally { - transaction.end(); - } - - assertThat(root).isNotNull(); - root.end(callTreePool, 0); - return root; - } - - private static class StackTraceEvent { - - private final List trace; - private final long nanoTime; - - public StackTraceEvent(List trace, long nanoTime) { - - this.trace = trace; - this.nanoTime = nanoTime; - } - } - - private static void handleSpanEvent( - Tracer tracer, - Map spanMap, - Map spanScopeMap, - String name, - long nanoTime) { - if (!spanMap.containsKey(name)) { - Span span = - tracer - .spanBuilder(name) - .setParent(Context.current()) - .setStartTimestamp(nanoTime, TimeUnit.NANOSECONDS) - .startSpan(); - spanMap.put(name, span); - spanScopeMap.put(name, span.makeCurrent()); - } else { - spanScopeMap.remove(name).close(); - spanMap.get(name).end(nanoTime, TimeUnit.NANOSECONDS); - } - } -} diff --git a/inferred-spans/src/test/java/co/elastic/otel/profiler/FixedClock.java b/inferred-spans/src/test/java/co/elastic/otel/profiler/FixedClock.java deleted file mode 100644 index 5885feee..00000000 --- a/inferred-spans/src/test/java/co/elastic/otel/profiler/FixedClock.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import io.opentelemetry.api.trace.Span; -import io.opentelemetry.context.Context; -import io.opentelemetry.sdk.trace.ReadWriteSpan; - -public class FixedClock extends SpanAnchoredClock { - - private long nanoTime = -1L; - - @Override - public void onSpanStart(ReadWriteSpan started, Context parentContext) {} - - @Override - public long nanoTime() { - if (nanoTime == -1L) { - return System.nanoTime(); - } - return nanoTime; - } - - @Override - public long getAnchor(Span parent) { - return 0; - } - - @Override - public long toEpochNanos(long anchor, long recordedNanoTime) { - return recordedNanoTime; - } - - public void setNanoTime(long nanoTime) { - this.nanoTime = nanoTime; - } -} diff --git a/inferred-spans/src/test/java/co/elastic/otel/profiler/ProfilerTestSetup.java b/inferred-spans/src/test/java/co/elastic/otel/profiler/ProfilerTestSetup.java deleted file mode 100644 index 80f66511..00000000 --- a/inferred-spans/src/test/java/co/elastic/otel/profiler/ProfilerTestSetup.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import io.opentelemetry.sdk.OpenTelemetrySdk; -import io.opentelemetry.sdk.testing.exporter.InMemorySpanExporter; -import io.opentelemetry.sdk.trace.SdkTracerProvider; -import io.opentelemetry.sdk.trace.data.SpanData; -import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; -import java.util.List; -import java.util.function.Consumer; - -public class ProfilerTestSetup implements AutoCloseable { - - OpenTelemetrySdk sdk; - - SamplingProfiler profiler; - - InMemorySpanExporter spanExporter; - - public ProfilerTestSetup( - OpenTelemetrySdk sdk, InferredSpansProcessor processor, InMemorySpanExporter spanExporter) { - this.sdk = sdk; - this.profiler = processor.profiler; - this.spanExporter = spanExporter; - } - - public List getSpans() { - return spanExporter.getFinishedSpanItems(); - } - - @Override - public void close() { - sdk.close(); - } - - public static ProfilerTestSetup create(Consumer configCustomizer) { - InferredSpansProcessorBuilder builder = InferredSpansConfiguration.builder(); - configCustomizer.accept(builder); - - InferredSpansProcessor processor = builder.build(); - - InMemorySpanExporter exporter = InMemorySpanExporter.create(); - - SdkTracerProvider tracerProvider = - SdkTracerProvider.builder() - .addSpanProcessor(processor) - .addSpanProcessor(SimpleSpanProcessor.create(exporter)) - .build(); - processor.setTracerProvider(tracerProvider); - - OpenTelemetrySdk sdk = OpenTelemetrySdk.builder().setTracerProvider(tracerProvider).build(); - - return new ProfilerTestSetup(sdk, processor, exporter); - } -} diff --git a/inferred-spans/src/test/java/co/elastic/otel/profiler/SamplingProfilerQueueTest.java b/inferred-spans/src/test/java/co/elastic/otel/profiler/SamplingProfilerQueueTest.java deleted file mode 100644 index 2e8926f1..00000000 --- a/inferred-spans/src/test/java/co/elastic/otel/profiler/SamplingProfilerQueueTest.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import static org.assertj.core.api.Assertions.assertThat; - -import co.elastic.otel.testing.DisabledOnOpenJ9; -import io.opentelemetry.api.trace.Span; -import io.opentelemetry.api.trace.SpanContext; -import io.opentelemetry.api.trace.TraceFlags; -import io.opentelemetry.api.trace.TraceState; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.condition.DisabledOnOs; -import org.junit.jupiter.api.condition.OS; - -public class SamplingProfilerQueueTest { - - @Test - @DisabledOnOs(OS.WINDOWS) - @DisabledOnOpenJ9 - void testFillQueue() throws Exception { - - try (ProfilerTestSetup setup = - ProfilerTestSetup.create( - config -> config.clock(new FixedClock()).startScheduledProfiling(false))) { - - setup.profiler.setProfilingSessionOngoing(true); - - Span traceContext = - Span.wrap( - SpanContext.create( - "0af7651916cd43dd8448eb211c80319c", - "b7ad6b7169203331", - TraceFlags.getSampled(), - TraceState.getDefault())); - - assertThat(setup.profiler.onActivation(traceContext, null)).isTrue(); - - for (int i = 0; i < SamplingProfiler.RING_BUFFER_SIZE - 1; i++) { - assertThat(setup.profiler.onActivation(traceContext, null)).isTrue(); - } - - // no more free slots after adding RING_BUFFER_SIZE events - assertThat(setup.profiler.onActivation(traceContext, null)).isFalse(); - - setup.profiler.consumeActivationEventsFromRingBufferAndWriteToFile(); - - // now there should be free slots - assertThat(setup.profiler.onActivation(traceContext, null)).isTrue(); - } - } -} diff --git a/inferred-spans/src/test/java/co/elastic/otel/profiler/SamplingProfilerReplay.java b/inferred-spans/src/test/java/co/elastic/otel/profiler/SamplingProfilerReplay.java deleted file mode 100644 index a75dda0f..00000000 --- a/inferred-spans/src/test/java/co/elastic/otel/profiler/SamplingProfilerReplay.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import java.io.File; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.List; -import java.util.logging.Level; -import java.util.logging.Logger; -import java.util.stream.Collectors; - -/** - * Can be used in combination with the files created by {@link - * ProfilingConfiguration#backupDiagnosticFiles} to replay the creation of profiler-inferred spans. - * This is useful, for example, to troubleshoot why {@link - * co.elastic.apm.agent.impl.transaction.Span#childIds} are set as expected. - */ -public class SamplingProfilerReplay { - - private static final Logger logger = Logger.getLogger(SamplingProfilerReplay.class.getName()); - - public static void main(String[] args) throws Exception { - ClassLoader.getSystemClassLoader().setDefaultAssertionStatus(true); - File activationEventsFile = File.createTempFile("activations", ".dat"); - activationEventsFile.deleteOnExit(); - File jfrFile = File.createTempFile("traces", ".jfr"); - jfrFile.deleteOnExit(); - - try (ProfilerTestSetup setup = - ProfilerTestSetup.create( - config -> - config - .startScheduledProfiling(false) - .activationEventsFile(activationEventsFile) - .jfrFile(jfrFile))) { - Path baseDir = Paths.get(System.getProperty("java.io.tmpdir"), "profiler"); - List activationFiles = - Files.list(baseDir) - .filter(p -> p.toString().endsWith("activations.dat")) - .sorted() - .collect(Collectors.toList()); - List traceFiles = - Files.list(baseDir) - .filter(p -> p.toString().endsWith("traces.jfr")) - .sorted() - .collect(Collectors.toList()); - if (traceFiles.size() != activationFiles.size()) { - throw new IllegalStateException(); - } - for (int i = 0; i < activationFiles.size(); i++) { - logger.log( - Level.INFO, - "processing {0} {1}", - new Object[] {activationFiles.get(i), traceFiles.get(i)}); - setup.profiler.copyFromFiles(activationFiles.get(i), traceFiles.get(i)); - setup.profiler.processTraces(); - } - logger.log(Level.INFO, "{0}", setup.getSpans()); - } - } -} diff --git a/inferred-spans/src/test/java/co/elastic/otel/profiler/SamplingProfilerTest.java b/inferred-spans/src/test/java/co/elastic/otel/profiler/SamplingProfilerTest.java deleted file mode 100644 index bd1a659c..00000000 --- a/inferred-spans/src/test/java/co/elastic/otel/profiler/SamplingProfilerTest.java +++ /dev/null @@ -1,403 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat; -import static org.awaitility.Awaitility.await; - -import co.elastic.otel.common.ElasticAttributes; -import co.elastic.otel.testing.DisabledOnOpenJ9; -import io.opentelemetry.api.trace.Span; -import io.opentelemetry.api.trace.SpanContext; -import io.opentelemetry.api.trace.TraceFlags; -import io.opentelemetry.api.trace.TraceState; -import io.opentelemetry.api.trace.Tracer; -import io.opentelemetry.context.Scope; -import io.opentelemetry.sdk.OpenTelemetrySdk; -import io.opentelemetry.sdk.trace.data.SpanData; -import java.io.IOException; -import java.lang.reflect.Method; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.time.Duration; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; -import java.util.stream.Collectors; -import org.assertj.core.api.Assertions; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.condition.DisabledForJreRange; -import org.junit.jupiter.api.condition.DisabledOnOs; -import org.junit.jupiter.api.condition.JRE; -import org.junit.jupiter.api.condition.OS; - -// async-profiler doesn't work on Windows -@DisabledOnOs(OS.WINDOWS) -@DisabledOnOpenJ9 -class SamplingProfilerTest { - - private ProfilerTestSetup setup; - - @BeforeEach - void setup() { - // avoids any test failure to make other tests to fail - getProfilerTempFiles().forEach(SamplingProfilerTest::silentDeleteFile); - } - - @AfterEach - void tearDown() { - if (setup != null) { - setup.close(); - setup = null; - } - getProfilerTempFiles().forEach(SamplingProfilerTest::silentDeleteFile); - } - - @Test - void shouldLazilyCreateTempFilesAndCleanThem() throws Exception { - - List tempFiles = getProfilerTempFiles(); - assertThat(tempFiles).isEmpty(); - - // temporary files should be created on-demand, and properly deleted afterwards - setupProfiler(false); - - assertThat(setup.profiler.getProfilingSessions()) - .describedAs("profiler should not have any session when disabled") - .isEqualTo(0); - - assertThat(getProfilerTempFiles()) - .describedAs("should not create a temp file when disabled") - .isEmpty(); - - setup.close(); - setup = null; - setupProfiler(true); - - awaitProfilerStarted(setup.profiler); - - assertThat(getProfilerTempFiles()).describedAs("should have created two temp files").hasSize(2); - - setup.close(); - setup = null; - - assertThat(getProfilerTempFiles()) - .describedAs("should delete temp files when profiler is stopped") - .isEmpty(); - } - - private static List getProfilerTempFiles() { - Path tempFolder = Paths.get(System.getProperty("java.io.tmpdir")); - try { - return Files.list(tempFolder) - .filter(f -> f.getFileName().toString().startsWith("apm-")) - .sorted() - .collect(Collectors.toList()); - } catch (IOException e) { - throw new IllegalStateException(e); - } - } - - @Test - void shouldNotDeleteProvidedFiles() throws Exception { - // when an existing file is provided to the profiler, we should not delete it - // unlike the temporary files that are created by profiler itself - - InferredSpansConfiguration defaultConfig; - try (InferredSpansProcessor profiler1 = - InferredSpansProcessor.builder().startScheduledProfiling(false).build()) { - defaultConfig = profiler1.profiler.config; - } - - Path tempFile1 = Files.createTempFile("apm-provided", "test.bin"); - Path tempFile2 = Files.createTempFile("apm-provided", "test.jfr"); - - try (OpenTelemetrySdk sdk = OpenTelemetrySdk.builder().build()) { - - SamplingProfiler otherProfiler = - new SamplingProfiler( - defaultConfig, - new FixedClock(), - () -> sdk.getTracer("my-tracer"), - tempFile1.toFile(), - tempFile2.toFile()); - - otherProfiler.start(); - awaitProfilerStarted(otherProfiler); - otherProfiler.stop(); - } - - assertThat(tempFile1).exists(); - assertThat(tempFile2).exists(); - } - - @Test - void testStartCommand() { - setupProfiler(false); - assertThat(setup.profiler.createStartCommand()) - .isEqualTo( - "start,jfr,clock=m,event=wall,cstack=n,interval=5ms,filter,file=null,safemode=0"); - - setup.close(); - setupProfiler(config -> config.startScheduledProfiling(false).profilerLoggingEnabled(false)); - assertThat(setup.profiler.createStartCommand()) - .isEqualTo( - "start,jfr,clock=m,event=wall,cstack=n,interval=5ms,filter,file=null,safemode=0,loglevel=none"); - - setup.close(); - setupProfiler( - config -> - config - .startScheduledProfiling(false) - .profilerLoggingEnabled(false) - .samplingInterval(Duration.ofMillis(10)) - .asyncProfilerSafeMode(14)); - assertThat(setup.profiler.createStartCommand()) - .isEqualTo( - "start,jfr,clock=m,event=wall,cstack=n,interval=10ms,filter,file=null,safemode=14,loglevel=none"); - } - - @Test - void testProfileTransaction() throws Exception { - setupProfiler(true); - awaitProfilerStarted(setup.profiler); - - Tracer tracer = setup.sdk.getTracer("manual-spans"); - - boolean profilingActiveOnThread; - Span tx = tracer.spanBuilder("transaction").startSpan(); - try (Scope scope = tx.makeCurrent()) { - // makes sure that the rest will be captured by another profiling session - // this tests that restoring which threads to profile works - Thread.sleep(600); - profilingActiveOnThread = setup.profiler.isProfilingActiveOnThread(Thread.currentThread()); - aInferred(tracer); - } finally { - tx.end(); - } - - await() - .pollDelay(10, TimeUnit.MILLISECONDS) - .timeout(5000, TimeUnit.MILLISECONDS) - .untilAsserted(() -> assertThat(setup.getSpans()).hasSizeGreaterThanOrEqualTo(6)); - - assertThat(profilingActiveOnThread).isTrue(); - - Optional txData = - setup.getSpans().stream().filter(s -> s.getName().equals("transaction")).findAny(); - assertThat(txData).isPresent(); - assertThat(txData.get()).hasNoParent(); - - Optional testProfileTransaction = - setup.getSpans().stream() - .filter(s -> s.getName().equals("SamplingProfilerTest#testProfileTransaction")) - .findAny(); - assertThat(testProfileTransaction).isPresent(); - assertThat(testProfileTransaction.get()).hasParent(txData.get()); - - Optional inferredSpanA = - setup.getSpans().stream() - .filter(s -> s.getName().equals("SamplingProfilerTest#aInferred")) - .findAny(); - assertThat(inferredSpanA).isPresent(); - assertThat(inferredSpanA.get()).hasParent(testProfileTransaction.get()); - - Optional explicitSpanB = - setup.getSpans().stream().filter(s -> s.getName().equals("bExplicit")).findAny(); - assertThat(explicitSpanB).isPresent(); - assertThat(explicitSpanB.get()).hasParent(txData.get()); - - assertThat(inferredSpanA.get().getLinks()) - .hasSize(1) - .anySatisfy( - link -> { - assertThat(link.getAttributes()).containsEntry("elastic.is_child", true); - SpanData expectedSpan = explicitSpanB.get(); - Assertions.assertThat(link.getSpanContext().getTraceId()) - .isEqualTo(expectedSpan.getTraceId()); - Assertions.assertThat(link.getSpanContext().getSpanId()) - .isEqualTo(expectedSpan.getSpanId()); - }); - - Optional inferredSpanC = - setup.getSpans().stream() - .filter(s -> s.getName().equals("SamplingProfilerTest#cInferred")) - .findAny(); - assertThat(inferredSpanC).isPresent(); - assertThat(inferredSpanC.get()).hasParent(explicitSpanB.get()); - - Optional inferredSpanD = - setup.getSpans().stream() - .filter(s -> s.getName().equals("SamplingProfilerTest#dInferred")) - .findAny(); - assertThat(inferredSpanD).isPresent(); - assertThat(inferredSpanD.get()).hasParent(inferredSpanC.get()); - } - - @Test - @DisabledForJreRange(max = JRE.JAVA_20) - void testVirtualThreadsExcluded() throws Exception { - setupProfiler(true); - awaitProfilerStarted(setup.profiler); - Tracer tracer = setup.sdk.getTracer("manual-spans"); - - AtomicReference profilingActive = new AtomicReference<>(); - Runnable task = - () -> { - Span tx = tracer.spanBuilder("transaction").startSpan(); - try (Scope scope = tx.makeCurrent()) { - profilingActive.set(setup.profiler.isProfilingActiveOnThread(Thread.currentThread())); - } finally { - tx.end(); - } - }; - - Method startVirtualThread = Thread.class.getMethod("startVirtualThread", Runnable.class); - Thread virtual = (Thread) startVirtualThread.invoke(null, task); - virtual.join(); - - assertThat(profilingActive.get()).isFalse(); - } - - @Test - void testTransactionWithRemoteParent() throws Exception { - setupProfiler(true); - awaitProfilerStarted(setup.profiler); - - SpanContext dummyParentCtx = - SpanContext.createFromRemoteParent( - "a1a2a3a4a5a6a7a8b1b2b3b4b5b6b7b8", - "c1c2c3c4c5c6c7c8", - TraceFlags.getSampled(), - TraceState.getDefault()); - Span remoteParent = Span.wrap(dummyParentCtx); - try (Scope scope = remoteParent.makeCurrent()) { - // ensure that a remote span activation does not trigger profiling - assertThat(setup.profiler.isProfilingActiveOnThread(Thread.currentThread())).isFalse(); - - Tracer tracer = setup.sdk.getTracer("manual-spans"); - Span localRoot = tracer.spanBuilder("local-root").startSpan(); - try (Scope scope2 = localRoot.makeCurrent()) { - Thread.sleep(500); - } - localRoot.end(); - } - - await() - .pollDelay(10, TimeUnit.MILLISECONDS) - .timeout(5000, TimeUnit.MILLISECONDS) - .untilAsserted(() -> assertThat(setup.getSpans()).hasSizeGreaterThanOrEqualTo(2)); - - List spans = setup.getSpans(); - Optional localRoot = - spans.stream().filter(s -> s.getName().equals("local-root")).findAny(); - assertThat(localRoot).isPresent(); - assertThat(localRoot.get()).hasParentSpanId("c1c2c3c4c5c6c7c8"); - - assertThat(spans) - .anySatisfy( - span -> { - assertThat(span).hasParent(localRoot.get()); - assertThat(span).hasAttribute(ElasticAttributes.IS_INFERRED, true); - }); - } - - @Test - void testPostProcessingDisabled() throws Exception { - setupProfiler(config -> config.postProcessingEnabled(false)); - awaitProfilerStarted(setup.profiler); - Tracer tracer = setup.sdk.getTracer("manual-spans"); - - Span tx = tracer.spanBuilder("transaction").startSpan(); - try (Scope scope = tx.makeCurrent()) { - // makes sure that the rest will be captured by another profiling session - // this tests that restoring which threads to profile works - Thread.sleep(600); - aInferred(tracer); - } finally { - tx.end(); - } - - await() - .pollDelay(10, TimeUnit.MILLISECONDS) - .timeout(5000, TimeUnit.MILLISECONDS) - .untilAsserted(() -> assertThat(setup.getSpans()).hasSize(2)); - - Optional explicitSpanB = - setup.getSpans().stream().filter(s -> s.getName().equals("bExplicit")).findAny(); - assertThat(explicitSpanB).isPresent(); - assertThat(explicitSpanB.get()).hasParentSpanId(tx.getSpanContext().getSpanId()); - } - - private void aInferred(Tracer tracer) throws Exception { - Span span = tracer.spanBuilder("bExplicit").startSpan(); - try (Scope spanScope = span.makeCurrent()) { - cInferred(); - } finally { - span.end(); - } - Thread.sleep(50); - } - - private void cInferred() throws Exception { - dInferred(); - Thread.sleep(50); - } - - private void dInferred() throws Exception { - Thread.sleep(50); - } - - private void setupProfiler(boolean enabled) { - setupProfiler(config -> config.startScheduledProfiling(enabled)); - } - - private void setupProfiler(Consumer configCustomizer) { - setup = - ProfilerTestSetup.create( - config -> { - config - .profilingDuration(Duration.ofMillis(500)) - .profilerInterval(Duration.ofMillis(500)) - .samplingInterval(Duration.ofMillis(5)); - configCustomizer.accept(config); - }); - } - - private static void awaitProfilerStarted(SamplingProfiler profiler) { - // ensure profiler is initialized - await() - .pollDelay(10, TimeUnit.MILLISECONDS) - .timeout(6000, TimeUnit.MILLISECONDS) - .until(() -> profiler.getProfilingSessions() > 1); - } - - private static void silentDeleteFile(Path f) { - try { - Files.delete(f); - } catch (IOException e) { - throw new IllegalStateException(e); - } - } -} diff --git a/inferred-spans/src/test/java/co/elastic/otel/profiler/ThreadMatcherTest.java b/inferred-spans/src/test/java/co/elastic/otel/profiler/ThreadMatcherTest.java deleted file mode 100644 index ef0d52a5..00000000 --- a/inferred-spans/src/test/java/co/elastic/otel/profiler/ThreadMatcherTest.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler; - -import static org.assertj.core.api.Assertions.assertThat; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import org.junit.jupiter.api.Test; - -class ThreadMatcherTest { - - private final ThreadMatcher threadMatcher = new ThreadMatcher(); - - @Test - void testLookup() { - ArrayList threads = new ArrayList<>(); - threadMatcher.forEachThread( - new ThreadMatcher.NonCapturingPredicate() { - @Override - public boolean test(Thread thread, Void state) { - return thread.getId() == Thread.currentThread().getId(); - } - }, - null, - new ThreadMatcher.NonCapturingConsumer>() { - @Override - public void accept(Thread thread, List state) { - state.add(thread); - } - }, - threads); - assertThat(threads).isEqualTo(Arrays.asList(Thread.currentThread())); - } -} diff --git a/inferred-spans/src/test/java/co/elastic/otel/profiler/asyncprofiler/JfrParserTest.java b/inferred-spans/src/test/java/co/elastic/otel/profiler/asyncprofiler/JfrParserTest.java deleted file mode 100644 index 5e9fd1ee..00000000 --- a/inferred-spans/src/test/java/co/elastic/otel/profiler/asyncprofiler/JfrParserTest.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package co.elastic.otel.profiler.asyncprofiler; - -import static co.elastic.otel.common.config.WildcardMatcher.caseSensitiveMatcher; -import static org.assertj.core.api.Assertions.assertThat; - -import co.elastic.otel.profiler.StackFrame; -import java.io.File; -import java.nio.ByteBuffer; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Collections; -import java.util.concurrent.atomic.AtomicInteger; -import org.junit.jupiter.api.Test; - -class JfrParserTest { - - private static final int MAX_STACK_DEPTH = 4; - - @Test - void name() throws Exception { - // Using a small buffer, but big enough to fit the largest string in the JFR file to test edge - // cases - JfrParser jfrParser = new JfrParser(ByteBuffer.allocate(368), ByteBuffer.allocate(368)); - - File file = - Paths.get(JfrParserTest.class.getClassLoader().getResource("recording.jfr").toURI()) - .toFile(); - - jfrParser.parse( - file, - Collections.emptyList(), - Collections.singletonList(caseSensitiveMatcher("co.elastic.otel.*"))); - AtomicInteger stackTraces = new AtomicInteger(); - ArrayList stackFrames = new ArrayList<>(); - jfrParser.consumeStackTraces( - (threadId, stackTraceId, nanoTime) -> { - jfrParser.resolveStackTrace(stackTraceId, stackFrames, MAX_STACK_DEPTH); - if (!stackFrames.isEmpty()) { - stackTraces.incrementAndGet(); - assertThat(stackFrames.get(stackFrames.size() - 1).getMethodName()) - .isEqualTo("testProfileTransaction"); - assertThat(stackFrames).hasSizeLessThanOrEqualTo(MAX_STACK_DEPTH); - } - stackFrames.clear(); - }); - assertThat(stackTraces.get()).isEqualTo(98); - } -} diff --git a/licenses/more-licences.md b/licenses/more-licences.md index aec18504..cd4506ad 100644 --- a/licenses/more-licences.md +++ b/licenses/more-licences.md @@ -20,7 +20,7 @@ **4** **Group:** `com.lmax` **Name:** `disruptor` **Version:** `3.4.4` > - **Project URL**: [http://lmax-exchange.github.com/disruptor](http://lmax-exchange.github.com/disruptor) -> - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) +> - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) **5** **Group:** `io.opentelemetry` **Name:** `opentelemetry-api` **Version:** `1.40.0` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java](https://github.com/open-telemetry/opentelemetry-java) @@ -58,61 +58,69 @@ > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java](https://github.com/open-telemetry/opentelemetry-java) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**14** **Group:** `io.opentelemetry.contrib` **Name:** `opentelemetry-resource-providers` **Version:** `1.36.0-alpha` +**14** **Group:** `io.opentelemetry.contrib` **Name:** `opentelemetry-inferred-spans` **Version:** `1.37.0-alpha` +> - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java-contrib](https://github.com/open-telemetry/opentelemetry-java-contrib) +> - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) + +**15** **Group:** `io.opentelemetry.contrib` **Name:** `opentelemetry-resource-providers` **Version:** `1.36.0-alpha` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java-contrib](https://github.com/open-telemetry/opentelemetry-java-contrib) > - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) -**15** **Group:** `io.opentelemetry.contrib` **Name:** `opentelemetry-span-stacktrace` **Version:** `1.36.0-alpha` +**16** **Group:** `io.opentelemetry.contrib` **Name:** `opentelemetry-span-stacktrace` **Version:** `1.36.0-alpha` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java-contrib](https://github.com/open-telemetry/opentelemetry-java-contrib) > - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) -**16** **Group:** `io.opentelemetry.javaagent` **Name:** `opentelemetry-javaagent-extension-api` **Version:** `2.6.0-alpha` +**17** **Group:** `io.opentelemetry.javaagent` **Name:** `opentelemetry-javaagent-extension-api` **Version:** `2.6.0-alpha` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java-instrumentation](https://github.com/open-telemetry/opentelemetry-java-instrumentation) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**17** **Group:** `io.opentelemetry.javaagent` **Name:** `opentelemetry-javaagent-tooling` **Version:** `2.6.0-alpha` +**18** **Group:** `io.opentelemetry.javaagent` **Name:** `opentelemetry-javaagent-tooling` **Version:** `2.6.0-alpha` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java-instrumentation](https://github.com/open-telemetry/opentelemetry-java-instrumentation) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**18** **Group:** `io.opentelemetry.semconv` **Name:** `opentelemetry-semconv` **Version:** `1.25.0-alpha` +**19** **Group:** `io.opentelemetry.semconv` **Name:** `opentelemetry-semconv` **Version:** `1.25.0-alpha` > - **POM Project URL**: [https://github.com/open-telemetry/semantic-conventions-java](https://github.com/open-telemetry/semantic-conventions-java) > - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) -**19** **Group:** `io.opentelemetry.semconv` **Name:** `opentelemetry-semconv-incubating` **Version:** `1.25.0-alpha` +**20** **Group:** `io.opentelemetry.semconv` **Name:** `opentelemetry-semconv-incubating` **Version:** `1.25.0-alpha` > - **POM Project URL**: [https://github.com/open-telemetry/semantic-conventions-java](https://github.com/open-telemetry/semantic-conventions-java) > - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) -**20** **Group:** `net.bytebuddy` **Name:** `byte-buddy-dep` **Version:** `1.14.18` +**21** **Group:** `net.bytebuddy` **Name:** `byte-buddy-dep` **Version:** `1.14.18` > - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) > - **Embedded license files**: [byte-buddy-dep-1.14.18.jar/META-INF/LICENSE](byte-buddy-dep-1.14.18.jar/META-INF/LICENSE) - [byte-buddy-dep-1.14.18.jar/META-INF/NOTICE](byte-buddy-dep-1.14.18.jar/META-INF/NOTICE) -**21** **Group:** `org.jctools` **Name:** `jctools-core` **Version:** `4.0.5` +**22** **Group:** `org.agrona` **Name:** `agrona` **Version:** `1.21.2` +> - **POM Project URL**: [https://github.com/real-logic/agrona](https://github.com/real-logic/agrona) +> - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) + +**23** **Group:** `org.jctools` **Name:** `jctools-core` **Version:** `4.0.5` > - **Manifest License**: Apache License, Version 2.0 (Not Packaged) > - **POM Project URL**: [https://github.com/JCTools](https://github.com/JCTools) -> - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) +> - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**22** **Group:** `org.ow2.asm` **Name:** `asm` **Version:** `9.7` +**24** **Group:** `org.ow2.asm` **Name:** `asm` **Version:** `9.7` > - **Manifest Project URL**: [http://asm.ow2.org](http://asm.ow2.org) > - **Manifest License**: The 3-Clause BSD License (Not Packaged) > - **POM Project URL**: [http://asm.ow2.io/](http://asm.ow2.io/) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) > - **POM License**: The 3-Clause BSD License - [https://opensource.org/licenses/BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause) -**23** **Group:** `org.ow2.asm` **Name:** `asm-commons` **Version:** `9.7` +**25** **Group:** `org.ow2.asm` **Name:** `asm-commons` **Version:** `9.7` > - **Manifest Project URL**: [http://asm.ow2.org](http://asm.ow2.org) > - **Manifest License**: The 3-Clause BSD License (Not Packaged) > - **POM Project URL**: [http://asm.ow2.io/](http://asm.ow2.io/) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) > - **POM License**: The 3-Clause BSD License - [https://opensource.org/licenses/BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause) -**24** **Group:** `tools.profiler` **Name:** `async-profiler` **Version:** `3.0` +**26** **Group:** `tools.profiler` **Name:** `async-profiler` **Version:** `3.0` > - **POM Project URL**: [https://profiler.tools](https://profiler.tools) > - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) ## Creative Commons Legal Code -**25** **Group:** `org.hdrhistogram` **Name:** `HdrHistogram` **Version:** `2.2.2` +**27** **Group:** `org.hdrhistogram` **Name:** `HdrHistogram` **Version:** `2.2.2` > - **Manifest License**: The 2-Clause BSD License (Not Packaged) > - **POM Project URL**: [http://hdrhistogram.github.io/HdrHistogram/](http://hdrhistogram.github.io/HdrHistogram/) > - **POM License**: Creative Commons Legal Code - [https://creativecommons.org/publicdomain/zero/1.0/legalcode](https://creativecommons.org/publicdomain/zero/1.0/legalcode) @@ -122,7 +130,7 @@ ## PUBLIC DOMAIN -**26** **Group:** `org.hdrhistogram` **Name:** `HdrHistogram` **Version:** `2.2.2` +**28** **Group:** `org.hdrhistogram` **Name:** `HdrHistogram` **Version:** `2.2.2` > - **Manifest License**: The 2-Clause BSD License (Not Packaged) > - **POM Project URL**: [http://hdrhistogram.github.io/HdrHistogram/](http://hdrhistogram.github.io/HdrHistogram/) > - **POM License**: Creative Commons Legal Code - [https://creativecommons.org/publicdomain/zero/1.0/legalcode](https://creativecommons.org/publicdomain/zero/1.0/legalcode) @@ -132,7 +140,7 @@ ## The 2-Clause BSD License -**27** **Group:** `org.hdrhistogram` **Name:** `HdrHistogram` **Version:** `2.2.2` +**29** **Group:** `org.hdrhistogram` **Name:** `HdrHistogram` **Version:** `2.2.2` > - **Manifest License**: The 2-Clause BSD License (Not Packaged) > - **POM Project URL**: [http://hdrhistogram.github.io/HdrHistogram/](http://hdrhistogram.github.io/HdrHistogram/) > - **POM License**: Creative Commons Legal Code - [https://creativecommons.org/publicdomain/zero/1.0/legalcode](https://creativecommons.org/publicdomain/zero/1.0/legalcode) @@ -142,14 +150,14 @@ ## The 3-Clause BSD License -**28** **Group:** `org.ow2.asm` **Name:** `asm` **Version:** `9.7` +**30** **Group:** `org.ow2.asm` **Name:** `asm` **Version:** `9.7` > - **Manifest Project URL**: [http://asm.ow2.org](http://asm.ow2.org) > - **Manifest License**: The 3-Clause BSD License (Not Packaged) > - **POM Project URL**: [http://asm.ow2.io/](http://asm.ow2.io/) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) > - **POM License**: The 3-Clause BSD License - [https://opensource.org/licenses/BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause) -**29** **Group:** `org.ow2.asm` **Name:** `asm-commons` **Version:** `9.7` +**31** **Group:** `org.ow2.asm` **Name:** `asm-commons` **Version:** `9.7` > - **Manifest Project URL**: [http://asm.ow2.org](http://asm.ow2.org) > - **Manifest License**: The 3-Clause BSD License (Not Packaged) > - **POM Project URL**: [http://asm.ow2.io/](http://asm.ow2.io/) diff --git a/testing/integration-tests/inferred-spans-test/build.gradle.kts b/testing/integration-tests/inferred-spans-test/build.gradle.kts index 773ed94b..b68ad6a4 100644 --- a/testing/integration-tests/inferred-spans-test/build.gradle.kts +++ b/testing/integration-tests/inferred-spans-test/build.gradle.kts @@ -16,9 +16,9 @@ tasks.withType() { jvmArgs( //"-Dotel.javaagent.debug=true", "-Dotel.service.name=testing", - "-Delastic.otel.inferred.spans.enabled=true", - "-Delastic.otel.inferred.spans.duration=2000ms", - "-Delastic.otel.inferred.spans.interval=2000ms", - "-Delastic.otel.inferred.spans.sampling.interval=5ms" + "-Dotel.inferred.spans.enabled=true", + "-Dotel.inferred.spans.duration=2000ms", + "-Dotel.inferred.spans.interval=2000ms", + "-Dotel.inferred.spans.sampling.interval=5ms" ) } From 8aa033c70ff5accfba1e108b2c9bb4f7f334e383 Mon Sep 17 00:00:00 2001 From: Jonas Kunz Date: Mon, 19 Aug 2024 10:19:13 +0200 Subject: [PATCH 2/3] move dependency to catalog --- gradle/libs.versions.toml | 1 + inferred-spans/build.gradle.kts | 3 +- .../META-INF/LICENSE | 0 .../META-INF/NOTICE | 0 licenses/more-licences.md | 32 +++++++++---------- 5 files changed, 18 insertions(+), 18 deletions(-) rename licenses/{byte-buddy-dep-1.14.18.jar => byte-buddy-dep-1.14.19.jar}/META-INF/LICENSE (100%) rename licenses/{byte-buddy-dep-1.14.18.jar => byte-buddy-dep-1.14.19.jar}/META-INF/NOTICE (100%) diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index db217a39..e6c260fa 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -33,6 +33,7 @@ awsContribResources = { group = "io.opentelemetry.contrib", name = "opentelemetr gcpContribResources = { group = "io.opentelemetry.contrib", name = "opentelemetry-gcp-resources", version.ref = "opentelemetryContribAlpha" } contribResources = { group = "io.opentelemetry.contrib", name = "opentelemetry-resource-providers", version.ref = "opentelemetryContribAlpha" } contribSpanStacktrace = { group = "io.opentelemetry.contrib", name = "opentelemetry-span-stacktrace", version.ref = "opentelemetryContribAlpha" } +contribInferredSpans = { group = "io.opentelemetry.contrib", name = "opentelemetry-inferred-spans", version.ref = "opentelemetryContribAlpha" } opentelemetrySemconv = { group = "io.opentelemetry.semconv", name = "opentelemetry-semconv", version.ref = "opentelemetrySemconvAlpha"} opentelemetrySemconvIncubating = { group = "io.opentelemetry.semconv", name = "opentelemetry-semconv-incubating", version.ref = "opentelemetrySemconvAlpha"} diff --git a/inferred-spans/build.gradle.kts b/inferred-spans/build.gradle.kts index b6b28653..9207b15e 100644 --- a/inferred-spans/build.gradle.kts +++ b/inferred-spans/build.gradle.kts @@ -8,8 +8,7 @@ description = "Elastic Inferred Spans extension for OpenTelemetry Java" dependencies { compileOnly("io.opentelemetry:opentelemetry-sdk") compileOnly("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure-spi") - implementation("io.opentelemetry.contrib:opentelemetry-inferred-spans") - implementation(project(":common")) + implementation(libs.contribInferredSpans) testImplementation(project(":testing-common")) testImplementation("io.opentelemetry:opentelemetry-sdk") diff --git a/licenses/byte-buddy-dep-1.14.18.jar/META-INF/LICENSE b/licenses/byte-buddy-dep-1.14.19.jar/META-INF/LICENSE similarity index 100% rename from licenses/byte-buddy-dep-1.14.18.jar/META-INF/LICENSE rename to licenses/byte-buddy-dep-1.14.19.jar/META-INF/LICENSE diff --git a/licenses/byte-buddy-dep-1.14.18.jar/META-INF/NOTICE b/licenses/byte-buddy-dep-1.14.19.jar/META-INF/NOTICE similarity index 100% rename from licenses/byte-buddy-dep-1.14.18.jar/META-INF/NOTICE rename to licenses/byte-buddy-dep-1.14.19.jar/META-INF/NOTICE diff --git a/licenses/more-licences.md b/licenses/more-licences.md index cd4506ad..59c28c9e 100644 --- a/licenses/more-licences.md +++ b/licenses/more-licences.md @@ -22,39 +22,39 @@ > - **Project URL**: [http://lmax-exchange.github.com/disruptor](http://lmax-exchange.github.com/disruptor) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**5** **Group:** `io.opentelemetry` **Name:** `opentelemetry-api` **Version:** `1.40.0` +**5** **Group:** `io.opentelemetry` **Name:** `opentelemetry-api` **Version:** `1.41.0` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java](https://github.com/open-telemetry/opentelemetry-java) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**6** **Group:** `io.opentelemetry` **Name:** `opentelemetry-context` **Version:** `1.40.0` +**6** **Group:** `io.opentelemetry` **Name:** `opentelemetry-context` **Version:** `1.41.0` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java](https://github.com/open-telemetry/opentelemetry-java) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**7** **Group:** `io.opentelemetry` **Name:** `opentelemetry-sdk` **Version:** `1.40.0` +**7** **Group:** `io.opentelemetry` **Name:** `opentelemetry-sdk` **Version:** `1.41.0` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java](https://github.com/open-telemetry/opentelemetry-java) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**8** **Group:** `io.opentelemetry` **Name:** `opentelemetry-sdk-common` **Version:** `1.40.0` +**8** **Group:** `io.opentelemetry` **Name:** `opentelemetry-sdk-common` **Version:** `1.41.0` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java](https://github.com/open-telemetry/opentelemetry-java) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**9** **Group:** `io.opentelemetry` **Name:** `opentelemetry-sdk-extension-autoconfigure` **Version:** `1.40.0` +**9** **Group:** `io.opentelemetry` **Name:** `opentelemetry-sdk-extension-autoconfigure` **Version:** `1.41.0` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java](https://github.com/open-telemetry/opentelemetry-java) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**10** **Group:** `io.opentelemetry` **Name:** `opentelemetry-sdk-extension-autoconfigure-spi` **Version:** `1.40.0` +**10** **Group:** `io.opentelemetry` **Name:** `opentelemetry-sdk-extension-autoconfigure-spi` **Version:** `1.41.0` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java](https://github.com/open-telemetry/opentelemetry-java) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**11** **Group:** `io.opentelemetry` **Name:** `opentelemetry-sdk-logs` **Version:** `1.40.0` +**11** **Group:** `io.opentelemetry` **Name:** `opentelemetry-sdk-logs` **Version:** `1.41.0` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java](https://github.com/open-telemetry/opentelemetry-java) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**12** **Group:** `io.opentelemetry` **Name:** `opentelemetry-sdk-metrics` **Version:** `1.40.0` +**12** **Group:** `io.opentelemetry` **Name:** `opentelemetry-sdk-metrics` **Version:** `1.41.0` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java](https://github.com/open-telemetry/opentelemetry-java) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**13** **Group:** `io.opentelemetry` **Name:** `opentelemetry-sdk-trace` **Version:** `1.40.0` +**13** **Group:** `io.opentelemetry` **Name:** `opentelemetry-sdk-trace` **Version:** `1.41.0` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java](https://github.com/open-telemetry/opentelemetry-java) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) @@ -62,19 +62,19 @@ > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java-contrib](https://github.com/open-telemetry/opentelemetry-java-contrib) > - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) -**15** **Group:** `io.opentelemetry.contrib` **Name:** `opentelemetry-resource-providers` **Version:** `1.36.0-alpha` +**15** **Group:** `io.opentelemetry.contrib` **Name:** `opentelemetry-resource-providers` **Version:** `1.37.0-alpha` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java-contrib](https://github.com/open-telemetry/opentelemetry-java-contrib) > - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) -**16** **Group:** `io.opentelemetry.contrib` **Name:** `opentelemetry-span-stacktrace` **Version:** `1.36.0-alpha` +**16** **Group:** `io.opentelemetry.contrib` **Name:** `opentelemetry-span-stacktrace` **Version:** `1.37.0-alpha` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java-contrib](https://github.com/open-telemetry/opentelemetry-java-contrib) > - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) -**17** **Group:** `io.opentelemetry.javaagent` **Name:** `opentelemetry-javaagent-extension-api` **Version:** `2.6.0-alpha` +**17** **Group:** `io.opentelemetry.javaagent` **Name:** `opentelemetry-javaagent-extension-api` **Version:** `2.7.0-alpha` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java-instrumentation](https://github.com/open-telemetry/opentelemetry-java-instrumentation) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**18** **Group:** `io.opentelemetry.javaagent` **Name:** `opentelemetry-javaagent-tooling` **Version:** `2.6.0-alpha` +**18** **Group:** `io.opentelemetry.javaagent` **Name:** `opentelemetry-javaagent-tooling` **Version:** `2.7.0-alpha` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java-instrumentation](https://github.com/open-telemetry/opentelemetry-java-instrumentation) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) @@ -86,10 +86,10 @@ > - **POM Project URL**: [https://github.com/open-telemetry/semantic-conventions-java](https://github.com/open-telemetry/semantic-conventions-java) > - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) -**21** **Group:** `net.bytebuddy` **Name:** `byte-buddy-dep` **Version:** `1.14.18` +**21** **Group:** `net.bytebuddy` **Name:** `byte-buddy-dep` **Version:** `1.14.19` > - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) -> - **Embedded license files**: [byte-buddy-dep-1.14.18.jar/META-INF/LICENSE](byte-buddy-dep-1.14.18.jar/META-INF/LICENSE) - - [byte-buddy-dep-1.14.18.jar/META-INF/NOTICE](byte-buddy-dep-1.14.18.jar/META-INF/NOTICE) +> - **Embedded license files**: [byte-buddy-dep-1.14.19.jar/META-INF/LICENSE](byte-buddy-dep-1.14.19.jar/META-INF/LICENSE) + - [byte-buddy-dep-1.14.19.jar/META-INF/NOTICE](byte-buddy-dep-1.14.19.jar/META-INF/NOTICE) **22** **Group:** `org.agrona` **Name:** `agrona` **Version:** `1.21.2` > - **POM Project URL**: [https://github.com/real-logic/agrona](https://github.com/real-logic/agrona) From e9ef0090c7a0f4fec50ec5170f5528fd149a097c Mon Sep 17 00:00:00 2001 From: Jonas Kunz Date: Mon, 2 Sep 2024 16:35:45 +0200 Subject: [PATCH 3/3] fix merge conflicts --- ... => InferredSpansConfigMigrationTest.java} | 2 +- licenses/more-licences.md | 44 +++++++++++-------- 2 files changed, 27 insertions(+), 19 deletions(-) rename inferred-spans/src/test/java/co/elastic/otel/{InferredSpansAutoConfigTest.java => InferredSpansConfigMigrationTest.java} (99%) diff --git a/inferred-spans/src/test/java/co/elastic/otel/InferredSpansAutoConfigTest.java b/inferred-spans/src/test/java/co/elastic/otel/InferredSpansConfigMigrationTest.java similarity index 99% rename from inferred-spans/src/test/java/co/elastic/otel/InferredSpansAutoConfigTest.java rename to inferred-spans/src/test/java/co/elastic/otel/InferredSpansConfigMigrationTest.java index 94120e78..8adda1f9 100644 --- a/inferred-spans/src/test/java/co/elastic/otel/InferredSpansAutoConfigTest.java +++ b/inferred-spans/src/test/java/co/elastic/otel/InferredSpansConfigMigrationTest.java @@ -41,7 +41,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -public class InferredSpansAutoConfigTest { +public class InferredSpansConfigMigrationTest { @BeforeEach @AfterEach diff --git a/licenses/more-licences.md b/licenses/more-licences.md index 2397a0ca..59c28c9e 100644 --- a/licenses/more-licences.md +++ b/licenses/more-licences.md @@ -20,7 +20,7 @@ **4** **Group:** `com.lmax` **Name:** `disruptor` **Version:** `3.4.4` > - **Project URL**: [http://lmax-exchange.github.com/disruptor](http://lmax-exchange.github.com/disruptor) -> - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) +> - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) **5** **Group:** `io.opentelemetry` **Name:** `opentelemetry-api` **Version:** `1.41.0` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java](https://github.com/open-telemetry/opentelemetry-java) @@ -58,61 +58,69 @@ > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java](https://github.com/open-telemetry/opentelemetry-java) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**14** **Group:** `io.opentelemetry.contrib` **Name:** `opentelemetry-resource-providers` **Version:** `1.37.0-alpha` +**14** **Group:** `io.opentelemetry.contrib` **Name:** `opentelemetry-inferred-spans` **Version:** `1.37.0-alpha` +> - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java-contrib](https://github.com/open-telemetry/opentelemetry-java-contrib) +> - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) + +**15** **Group:** `io.opentelemetry.contrib` **Name:** `opentelemetry-resource-providers` **Version:** `1.37.0-alpha` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java-contrib](https://github.com/open-telemetry/opentelemetry-java-contrib) > - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) -**15** **Group:** `io.opentelemetry.contrib` **Name:** `opentelemetry-span-stacktrace` **Version:** `1.37.0-alpha` +**16** **Group:** `io.opentelemetry.contrib` **Name:** `opentelemetry-span-stacktrace` **Version:** `1.37.0-alpha` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java-contrib](https://github.com/open-telemetry/opentelemetry-java-contrib) > - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) -**16** **Group:** `io.opentelemetry.javaagent` **Name:** `opentelemetry-javaagent-extension-api` **Version:** `2.7.0-alpha` +**17** **Group:** `io.opentelemetry.javaagent` **Name:** `opentelemetry-javaagent-extension-api` **Version:** `2.7.0-alpha` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java-instrumentation](https://github.com/open-telemetry/opentelemetry-java-instrumentation) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**17** **Group:** `io.opentelemetry.javaagent` **Name:** `opentelemetry-javaagent-tooling` **Version:** `2.7.0-alpha` +**18** **Group:** `io.opentelemetry.javaagent` **Name:** `opentelemetry-javaagent-tooling` **Version:** `2.7.0-alpha` > - **POM Project URL**: [https://github.com/open-telemetry/opentelemetry-java-instrumentation](https://github.com/open-telemetry/opentelemetry-java-instrumentation) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**18** **Group:** `io.opentelemetry.semconv` **Name:** `opentelemetry-semconv` **Version:** `1.25.0-alpha` +**19** **Group:** `io.opentelemetry.semconv` **Name:** `opentelemetry-semconv` **Version:** `1.25.0-alpha` > - **POM Project URL**: [https://github.com/open-telemetry/semantic-conventions-java](https://github.com/open-telemetry/semantic-conventions-java) > - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) -**19** **Group:** `io.opentelemetry.semconv` **Name:** `opentelemetry-semconv-incubating` **Version:** `1.25.0-alpha` +**20** **Group:** `io.opentelemetry.semconv` **Name:** `opentelemetry-semconv-incubating` **Version:** `1.25.0-alpha` > - **POM Project URL**: [https://github.com/open-telemetry/semantic-conventions-java](https://github.com/open-telemetry/semantic-conventions-java) > - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) -**20** **Group:** `net.bytebuddy` **Name:** `byte-buddy-dep` **Version:** `1.14.19` +**21** **Group:** `net.bytebuddy` **Name:** `byte-buddy-dep` **Version:** `1.14.19` > - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) > - **Embedded license files**: [byte-buddy-dep-1.14.19.jar/META-INF/LICENSE](byte-buddy-dep-1.14.19.jar/META-INF/LICENSE) - [byte-buddy-dep-1.14.19.jar/META-INF/NOTICE](byte-buddy-dep-1.14.19.jar/META-INF/NOTICE) -**21** **Group:** `org.jctools` **Name:** `jctools-core` **Version:** `4.0.5` +**22** **Group:** `org.agrona` **Name:** `agrona` **Version:** `1.21.2` +> - **POM Project URL**: [https://github.com/real-logic/agrona](https://github.com/real-logic/agrona) +> - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) + +**23** **Group:** `org.jctools` **Name:** `jctools-core` **Version:** `4.0.5` > - **Manifest License**: Apache License, Version 2.0 (Not Packaged) > - **POM Project URL**: [https://github.com/JCTools](https://github.com/JCTools) -> - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) +> - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) -**22** **Group:** `org.ow2.asm` **Name:** `asm` **Version:** `9.7` +**24** **Group:** `org.ow2.asm` **Name:** `asm` **Version:** `9.7` > - **Manifest Project URL**: [http://asm.ow2.org](http://asm.ow2.org) > - **Manifest License**: The 3-Clause BSD License (Not Packaged) > - **POM Project URL**: [http://asm.ow2.io/](http://asm.ow2.io/) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) > - **POM License**: The 3-Clause BSD License - [https://opensource.org/licenses/BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause) -**23** **Group:** `org.ow2.asm` **Name:** `asm-commons` **Version:** `9.7` +**25** **Group:** `org.ow2.asm` **Name:** `asm-commons` **Version:** `9.7` > - **Manifest Project URL**: [http://asm.ow2.org](http://asm.ow2.org) > - **Manifest License**: The 3-Clause BSD License (Not Packaged) > - **POM Project URL**: [http://asm.ow2.io/](http://asm.ow2.io/) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) > - **POM License**: The 3-Clause BSD License - [https://opensource.org/licenses/BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause) -**24** **Group:** `tools.profiler` **Name:** `async-profiler` **Version:** `3.0` +**26** **Group:** `tools.profiler` **Name:** `async-profiler` **Version:** `3.0` > - **POM Project URL**: [https://profiler.tools](https://profiler.tools) > - **POM License**: Apache License, Version 2.0 - [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) ## Creative Commons Legal Code -**25** **Group:** `org.hdrhistogram` **Name:** `HdrHistogram` **Version:** `2.2.2` +**27** **Group:** `org.hdrhistogram` **Name:** `HdrHistogram` **Version:** `2.2.2` > - **Manifest License**: The 2-Clause BSD License (Not Packaged) > - **POM Project URL**: [http://hdrhistogram.github.io/HdrHistogram/](http://hdrhistogram.github.io/HdrHistogram/) > - **POM License**: Creative Commons Legal Code - [https://creativecommons.org/publicdomain/zero/1.0/legalcode](https://creativecommons.org/publicdomain/zero/1.0/legalcode) @@ -122,7 +130,7 @@ ## PUBLIC DOMAIN -**26** **Group:** `org.hdrhistogram` **Name:** `HdrHistogram` **Version:** `2.2.2` +**28** **Group:** `org.hdrhistogram` **Name:** `HdrHistogram` **Version:** `2.2.2` > - **Manifest License**: The 2-Clause BSD License (Not Packaged) > - **POM Project URL**: [http://hdrhistogram.github.io/HdrHistogram/](http://hdrhistogram.github.io/HdrHistogram/) > - **POM License**: Creative Commons Legal Code - [https://creativecommons.org/publicdomain/zero/1.0/legalcode](https://creativecommons.org/publicdomain/zero/1.0/legalcode) @@ -132,7 +140,7 @@ ## The 2-Clause BSD License -**27** **Group:** `org.hdrhistogram` **Name:** `HdrHistogram` **Version:** `2.2.2` +**29** **Group:** `org.hdrhistogram` **Name:** `HdrHistogram` **Version:** `2.2.2` > - **Manifest License**: The 2-Clause BSD License (Not Packaged) > - **POM Project URL**: [http://hdrhistogram.github.io/HdrHistogram/](http://hdrhistogram.github.io/HdrHistogram/) > - **POM License**: Creative Commons Legal Code - [https://creativecommons.org/publicdomain/zero/1.0/legalcode](https://creativecommons.org/publicdomain/zero/1.0/legalcode) @@ -142,14 +150,14 @@ ## The 3-Clause BSD License -**28** **Group:** `org.ow2.asm` **Name:** `asm` **Version:** `9.7` +**30** **Group:** `org.ow2.asm` **Name:** `asm` **Version:** `9.7` > - **Manifest Project URL**: [http://asm.ow2.org](http://asm.ow2.org) > - **Manifest License**: The 3-Clause BSD License (Not Packaged) > - **POM Project URL**: [http://asm.ow2.io/](http://asm.ow2.io/) > - **POM License**: Apache License, Version 2.0 - [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) > - **POM License**: The 3-Clause BSD License - [https://opensource.org/licenses/BSD-3-Clause](https://opensource.org/licenses/BSD-3-Clause) -**29** **Group:** `org.ow2.asm` **Name:** `asm-commons` **Version:** `9.7` +**31** **Group:** `org.ow2.asm` **Name:** `asm-commons` **Version:** `9.7` > - **Manifest Project URL**: [http://asm.ow2.org](http://asm.ow2.org) > - **Manifest License**: The 3-Clause BSD License (Not Packaged) > - **POM Project URL**: [http://asm.ow2.io/](http://asm.ow2.io/)