From 03a3886a6d6d625cb6a09b59ea782454c60728dc Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Thu, 12 Sep 2024 16:43:52 +0200 Subject: [PATCH 01/29] add avg_over_time to TraceQL metrics --- docs/sources/tempo/traceql/metrics-queries.md | 15 +- pkg/traceql/ast.go | 7 + pkg/traceql/engine_metrics.go | 126 ++- pkg/traceql/engine_metrics_test.go | 141 ++++ pkg/traceql/enum_aggregates.go | 3 + pkg/traceql/expr.y | 4 +- pkg/traceql/expr.y.go | 761 +++++++++--------- pkg/traceql/lexer.go | 1 + pkg/traceql/parse_test.go | 12 + pkg/traceql/test_examples.yaml | 2 + 10 files changed, 677 insertions(+), 395 deletions(-) diff --git a/docs/sources/tempo/traceql/metrics-queries.md b/docs/sources/tempo/traceql/metrics-queries.md index 086a5207b13..52b44ecdeec 100644 --- a/docs/sources/tempo/traceql/metrics-queries.md +++ b/docs/sources/tempo/traceql/metrics-queries.md @@ -62,6 +62,9 @@ These functions can be added as an operator at the end of any TraceQL query. `max_over_time` : Returns the maximum value of matching spans values per time interval (see the `step` API parameter) +`avg_over_time` +: Returns the average value of matching spans values per time interval (see the `step` API parameter) + `quantile_over_time` : The quantile of the values in the specified interval @@ -97,7 +100,7 @@ down by HTTP route. This might let you determine that `/api/sad` had a higher rate of erroring spans than `/api/happy`, for example. -### The `count_over_time`, `min_over_time` and `max_over_time` functions +### The `count_over_time`, `min_over_time`, `max_over_time` and `avg_over_time` functions The `count_over_time()` let you counts the number of matching spans per time interval. @@ -128,6 +131,16 @@ The `max_over_time()` let you aggregate numerical values by computing the maximu { name = "GET /:endpoint" } | max_over_time(span.http.status_code) ``` +The `avg_over_time()` let you aggregate numerical values by computing the average value of them, such as the all important span duration. + +``` +{ name = "GET /:endpoint" } | avg_over_time(duration) by (span.http.target) +``` + +``` +{ name = "GET /:endpoint" } | avg_over_time(event:cpu_seconds_tota) +``` + ### The `quantile_over_time` and `histogram_over_time` functions The `quantile_over_time()` and `histogram_over_time()` functions let you aggregate numerical values, such as the all important span duration. diff --git a/pkg/traceql/ast.go b/pkg/traceql/ast.go index 67d6770ff14..065c05ccdc7 100644 --- a/pkg/traceql/ast.go +++ b/pkg/traceql/ast.go @@ -1121,6 +1121,12 @@ func (a *MetricsAggregate) init(q *tempopb.QueryRangeRequest, mode AggregateMode exemplarFn = func(s Span) (float64, uint64) { return math.NaN(), a.spanStartTimeMs(s) } + case metricsAggregateAvgOverTime: + innerAgg = func() VectorAggregator { return NewOverTimeAggregator(a.attr, avgAggregation) } + a.simpleAggregationOp = avgAggregation + exemplarFn = func(s Span) (float64, uint64) { + return math.NaN(), a.spanStartTimeMs(s) + } case metricsAggregateRate: innerAgg = func() VectorAggregator { return NewRateAggregator(1.0 / time.Duration(q.Step).Seconds()) } a.simpleAggregationOp = sumAggregation @@ -1247,6 +1253,7 @@ func (a *MetricsAggregate) validate() error { case metricsAggregateCountOverTime: case metricsAggregateMinOverTime: case metricsAggregateMaxOverTime: + case metricsAggregateAvgOverTime: case metricsAggregateRate: case metricsAggregateHistogramOverTime: if len(a.by) >= maxGroupBys { diff --git a/pkg/traceql/engine_metrics.go b/pkg/traceql/engine_metrics.go index 7c711fe8fbf..39e34fd33ec 100644 --- a/pkg/traceql/engine_metrics.go +++ b/pkg/traceql/engine_metrics.go @@ -345,30 +345,39 @@ func (c *CountOverTimeAggregator) Sample() float64 { // calculate the rate when given a multiplier. type OverTimeAggregator struct { getSpanAttValue func(s Span) float64 - agg func(current, new float64) float64 + agg func(current *OverTimeAggregator, new float64) val float64 + count float64 // number of spans + + // Only for computing the average + c float64 // compesation for Kahan summation } var _ VectorAggregator = (*OverTimeAggregator)(nil) func NewOverTimeAggregator(attr Attribute, op SimpleAggregationOp) *OverTimeAggregator { var fn func(s Span) float64 - var agg func(current, new float64) float64 + var agg func(current *OverTimeAggregator, new float64) switch op { case maxAggregation: - agg = func(current, new float64) float64 { - if math.IsNaN(current) || new > current { - return new + agg = func(current *OverTimeAggregator, new float64) { + if math.IsNaN(current.val) || new > current.val { + current.val = new } - return current } case minAggregation: - agg = func(current, new float64) float64 { - if math.IsNaN(current) || new < current { - return new + agg = func(current *OverTimeAggregator, new float64) { + if math.IsNaN(current.val) || new < current.val { + current.val = new } - return current + } + case avgAggregation: + agg = func(current *OverTimeAggregator, inc float64) { + current.count++ + mean, c := averageInc(current.val, inc, current.count, current.c) + current.c = c + current.val = mean } } @@ -395,13 +404,54 @@ func NewOverTimeAggregator(attr Attribute, op SimpleAggregationOp) *OverTimeAggr } func (c *OverTimeAggregator) Observe(s Span) { - c.val = c.agg(c.val, c.getSpanAttValue(s)) + c.agg(c, c.getSpanAttValue(s)) } func (c *OverTimeAggregator) Sample() float64 { return c.val } +func averageInc(mean, inc, count, compensation float64) (float64, float64) { + if math.IsNaN(mean) && !math.IsNaN(inc) { + // When we have a proper value in the span we need to initialize to 0 + mean = 0 + } + if math.IsInf(mean, 0) { + if math.IsInf(inc, 0) && (mean > 0) == (inc > 0) { + // The `current.val` and `new` values are `Inf` of the same sign. They + // can't be subtracted, but the value of `current.val` is correct + // already. + return mean, compensation + } + if !math.IsInf(inc, 0) && !math.IsNaN(inc) { + // At this stage, the current.val is an infinite. If the added + // value is neither an Inf or a Nan, we can keep that mean + // value. + // This is required because our calculation below removes + // the mean value, which would look like Inf += x - Inf and + // end up as a NaN. + return mean, compensation + } + } + mean, c := kahanSumInc(inc/count-mean/count, mean, compensation) + return mean, c +} + +func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { + t := sum + inc + switch { + case math.IsInf(t, 0): + c = 0 + + // Using Neumaier improvement, swap if next term larger than sum. + case math.Abs(sum) >= math.Abs(inc): + c += (sum - t) + inc + default: + c += (inc - t) + sum + } + return t, c +} + // StepAggregator sorts spans into time slots using a step interval like 30s or 1m type StepAggregator struct { start, end, step uint64 @@ -1096,43 +1146,67 @@ const ( sumAggregation SimpleAggregationOp = iota minAggregation maxAggregation + avgAggregation ) type SimpleAggregator struct { ss SeriesSet exemplarBuckets *bucketSet len int - aggregationFunc func(existingValue float64, newValue float64) float64 + aggregationFunc func(b *SimpleAggregator, serie string, pos int, newValue float64) start, end, step uint64 initWithNaN bool + + // Only for average + initAvg bool + ssCounter map[string]map[int]float64 // Counter of processed elements to calculate median + ssCompensation map[string]map[int]float64 // Avg compensation } func NewSimpleCombiner(req *tempopb.QueryRangeRequest, op SimpleAggregationOp) *SimpleAggregator { l := IntervalCount(req.Start, req.End, req.Step) var initWithNaN bool - var f func(existingValue float64, newValue float64) float64 + var f func(b *SimpleAggregator, serie string, pos int, newValue float64) switch op { case minAggregation: // Simple min aggregator. It calculates the minimum between existing values and a new sample - f = func(existingValue float64, newValue float64) float64 { + f = func(b *SimpleAggregator, serie string, pos int, newValue float64) { + existingValue := b.ss[serie].Values[pos] if math.IsNaN(existingValue) || newValue < existingValue { - return newValue + b.ss[serie].Values[pos] = newValue } - return existingValue } initWithNaN = true case maxAggregation: // Simple max aggregator. It calculates the maximum between existing values and a new sample - f = func(existingValue float64, newValue float64) float64 { + f = func(b *SimpleAggregator, serie string, pos int, newValue float64) { + existingValue := b.ss[serie].Values[pos] if math.IsNaN(existingValue) || newValue > existingValue { - return newValue + b.ss[serie].Values[pos] = newValue } - return existingValue } initWithNaN = true + + case avgAggregation: + // Simple average aggregator. It calculates the average between existing values and a new sample + f = func(b *SimpleAggregator, serie string, pos int, inc float64) { + b.ssCounter[serie][pos]++ + mean := b.ss[serie].Values[pos] + count := b.ssCounter[serie][pos] + compensation := b.ssCompensation[serie][pos] + + mean, c := averageInc(mean, inc, count, compensation) + + b.ssCompensation[serie][pos] = c + b.ss[serie].Values[pos] = mean + } + initWithNaN = true + default: // Simple addition aggregator. It adds existing values with the new sample. - f = func(existingValue float64, newValue float64) float64 { return existingValue + newValue } + f = func(b *SimpleAggregator, serie string, pos int, newValue float64) { + b.ss[serie].Values[pos] += newValue + } initWithNaN = false } @@ -1145,6 +1219,7 @@ func NewSimpleCombiner(req *tempopb.QueryRangeRequest, op SimpleAggregationOp) * step: req.Step, aggregationFunc: f, initWithNaN: initWithNaN, + initAvg: (op == avgAggregation), } } @@ -1175,12 +1250,21 @@ func (b *SimpleAggregator) Combine(in []*tempopb.TimeSeries) { } b.ss[ts.PromLabels] = existing + if b.initAvg { + if b.ssCounter == nil { + b.ssCounter = map[string]map[int]float64{} + b.ssCompensation = map[string]map[int]float64{} + } + + b.ssCounter[ts.PromLabels] = make(map[int]float64, b.len) + b.ssCompensation[ts.PromLabels] = make(map[int]float64, b.len) + } } for _, sample := range ts.Samples { j := IntervalOfMs(sample.TimestampMs, b.start, b.end, b.step) if j >= 0 && j < len(existing.Values) { - existing.Values[j] = b.aggregationFunc(existing.Values[j], sample.Value) + b.aggregationFunc(b, ts.PromLabels, j, sample.Value) } } diff --git a/pkg/traceql/engine_metrics_test.go b/pkg/traceql/engine_metrics_test.go index 0656de28a9e..7cc8d6fdc2c 100644 --- a/pkg/traceql/engine_metrics_test.go +++ b/pkg/traceql/engine_metrics_test.go @@ -617,6 +617,147 @@ func TestMinOverTimeForSpanAttribute(t *testing.T) { } } +func TestAvgOverTimeForDuration(t *testing.T) { + req := &tempopb.QueryRangeRequest{ + Start: uint64(1 * time.Second), + End: uint64(3 * time.Second), + Step: uint64(1 * time.Second), + Query: "{ } | avg_over_time(duration) by (span.foo)", + } + + // A variety of spans across times, durations, and series. All durations are powers of 2 for simplicity + in := []Span{ + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithDuration(100), + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithDuration(100), + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithDuration(100), + + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithDuration(100), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithDuration(100), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithDuration(100), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithDuration(500), + + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithDuration(100), + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithDuration(200), + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithDuration(300), + } + + result := runTraceQLMetric(t, req, in) + + fooBaz := result[`{span.foo="baz"}`] + fooBar := result[`{span.foo="bar"}`] + + // We cannot compare with require.Equal because NaN != NaN + assert.True(t, math.IsNaN(fooBaz.Values[0])) + assert.True(t, math.IsNaN(fooBaz.Values[1])) + // assert.Equal(t, 300/float64(time.Second), fooBaz.Values[2]) + + // foo.bar = (0.000000128, 0.000000128, NaN) + // assert.Equal(t, 100/float64(time.Second), fooBar.Values[0]) + // assert.Equal(t, 400/float64(time.Second), fooBar.Values[1]) + assert.True(t, math.IsNaN(fooBar.Values[2])) +} + +func TestAvgOverTimeForSpanAttribute(t *testing.T) { + req := &tempopb.QueryRangeRequest{ + Start: uint64(1 * time.Second), + End: uint64(3 * time.Second), + Step: uint64(1 * time.Second), + Query: "{ } | avg_over_time(span.http.status_code) by (span.foo)", + } + + // A variety of spans across times, durations, and series. All durations are powers of 2 for simplicity + in := []Span{ + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(128), + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 404).WithDuration(256), + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(512), + + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(256), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(64), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(256), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(8), + + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 200).WithDuration(512), + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 400).WithDuration(1024), + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 300).WithDuration(512), + } + + in2 := []Span{ + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(128), + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(256), + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(512), + + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(256), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(64), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(256), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(8), + + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 200).WithDuration(512), + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 200).WithDuration(1024), + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 200).WithDuration(512), + } + + result := runTraceQLMetric(t, req, in, in2) + + fooBaz := result[`{span.foo="baz"}`] + fooBar := result[`{span.foo="bar"}`] + + // Alas,we cannot compare with require.Equal because NaN != NaN + // foo.baz = (NaN, NaN, 250) + assert.True(t, math.IsNaN(fooBaz.Values[0])) + assert.True(t, math.IsNaN(fooBaz.Values[1])) + assert.Equal(t, 250.0, fooBaz.Values[2]) + + // foo.bar = (234,200, NaN) + assert.Equal(t, 234.0, fooBar.Values[0]) + assert.Equal(t, 200.0, fooBar.Values[1]) + assert.True(t, math.IsNaN(fooBar.Values[2])) + + // Test that NaN values are not included in the samples after casting to proto + ts := result.ToProto(req) + fooBarSamples := []tempopb.Sample{{TimestampMs: 1000, Value: 234}, {TimestampMs: 2000, Value: 200}} + fooBazSamples := []tempopb.Sample{{TimestampMs: 3000, Value: 250}} + + for _, s := range ts { + if s.PromLabels == "{span.foo=\"bar\"}" { + assert.Equal(t, fooBarSamples, s.Samples) + } else { + assert.Equal(t, fooBazSamples, s.Samples) + } + } +} + +func TestAvgOverTimeWithNoMatch(t *testing.T) { + req := &tempopb.QueryRangeRequest{ + Start: uint64(1 * time.Second), + End: uint64(3 * time.Second), + Step: uint64(1 * time.Second), + Query: "{ } | avg_over_time(span.buu)", + } + + // A variety of spans across times, durations, and series. All durations are powers of 2 for simplicity + in := []Span{ + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(128), + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 404).WithDuration(256), + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(512), + + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(256), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(64), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(256), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200).WithDuration(8), + + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 201).WithDuration(512), + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 401).WithDuration(1024), + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 500).WithDuration(512), + } + + result := runTraceQLMetric(t, req, in) + + // Test that empty timeseries are not included + ts := result.ToProto(req) + + assert.True(t, len(ts) == 0) +} + func TestMaxOverTimeForDuration(t *testing.T) { req := &tempopb.QueryRangeRequest{ Start: uint64(1 * time.Second), diff --git a/pkg/traceql/enum_aggregates.go b/pkg/traceql/enum_aggregates.go index b5169bf486d..89afe173e8a 100644 --- a/pkg/traceql/enum_aggregates.go +++ b/pkg/traceql/enum_aggregates.go @@ -56,6 +56,7 @@ const ( metricsAggregateCountOverTime metricsAggregateMinOverTime metricsAggregateMaxOverTime + metricsAggregateAvgOverTime metricsAggregateQuantileOverTime metricsAggregateHistogramOverTime ) @@ -70,6 +71,8 @@ func (a MetricsAggregateOp) String() string { return "min_over_time" case metricsAggregateMaxOverTime: return "max_over_time" + case metricsAggregateAvgOverTime: + return "avg_over_time" case metricsAggregateQuantileOverTime: return "quantile_over_time" case metricsAggregateHistogramOverTime: diff --git a/pkg/traceql/expr.y b/pkg/traceql/expr.y index b0c1e6a35a6..69faadaa8bb 100644 --- a/pkg/traceql/expr.y +++ b/pkg/traceql/expr.y @@ -100,7 +100,7 @@ import ( COUNT AVG MAX MIN SUM BY COALESCE SELECT END_ATTRIBUTE - RATE COUNT_OVER_TIME MIN_OVER_TIME MAX_OVER_TIME QUANTILE_OVER_TIME HISTOGRAM_OVER_TIME COMPARE + RATE COUNT_OVER_TIME MIN_OVER_TIME MAX_OVER_TIME AVG_OVER_TIME QUANTILE_OVER_TIME HISTOGRAM_OVER_TIME COMPARE WITH // Operators are listed with increasing precedence. @@ -302,6 +302,8 @@ metricsAggregation: | MIN_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS BY OPEN_PARENS attributeList CLOSE_PARENS { $$ = newMetricsAggregateWithAttr(metricsAggregateMinOverTime, $3, $7) } | MAX_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS { $$ = newMetricsAggregateWithAttr(metricsAggregateMaxOverTime, $3, nil) } | MAX_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS BY OPEN_PARENS attributeList CLOSE_PARENS { $$ = newMetricsAggregateWithAttr(metricsAggregateMaxOverTime, $3, $7) } + | AVG_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS { $$ = newMetricsAggregateWithAttr(metricsAggregateAvgOverTime, $3, nil) } + | AVG_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS BY OPEN_PARENS attributeList CLOSE_PARENS { $$ = newMetricsAggregateWithAttr(metricsAggregateAvgOverTime, $3, $7) } | QUANTILE_OVER_TIME OPEN_PARENS attribute COMMA numericList CLOSE_PARENS { $$ = newMetricsAggregateQuantileOverTime($3, $5, nil) } | QUANTILE_OVER_TIME OPEN_PARENS attribute COMMA numericList CLOSE_PARENS BY OPEN_PARENS attributeList CLOSE_PARENS { $$ = newMetricsAggregateQuantileOverTime($3, $5, $9) } | HISTOGRAM_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS { $$ = newMetricsAggregateWithAttr(metricsAggregateHistogramOverTime, $3, nil) } diff --git a/pkg/traceql/expr.y.go b/pkg/traceql/expr.y.go index 645193e7538..2846952a876 100644 --- a/pkg/traceql/expr.y.go +++ b/pkg/traceql/expr.y.go @@ -121,40 +121,41 @@ const RATE = 57408 const COUNT_OVER_TIME = 57409 const MIN_OVER_TIME = 57410 const MAX_OVER_TIME = 57411 -const QUANTILE_OVER_TIME = 57412 -const HISTOGRAM_OVER_TIME = 57413 -const COMPARE = 57414 -const WITH = 57415 -const PIPE = 57416 -const AND = 57417 -const OR = 57418 -const EQ = 57419 -const NEQ = 57420 -const LT = 57421 -const LTE = 57422 -const GT = 57423 -const GTE = 57424 -const NRE = 57425 -const RE = 57426 -const DESC = 57427 -const ANCE = 57428 -const SIBL = 57429 -const NOT_CHILD = 57430 -const NOT_PARENT = 57431 -const NOT_DESC = 57432 -const NOT_ANCE = 57433 -const UNION_CHILD = 57434 -const UNION_PARENT = 57435 -const UNION_DESC = 57436 -const UNION_ANCE = 57437 -const UNION_SIBL = 57438 -const ADD = 57439 -const SUB = 57440 -const NOT = 57441 -const MUL = 57442 -const DIV = 57443 -const MOD = 57444 -const POW = 57445 +const AVG_OVER_TIME = 57412 +const QUANTILE_OVER_TIME = 57413 +const HISTOGRAM_OVER_TIME = 57414 +const COMPARE = 57415 +const WITH = 57416 +const PIPE = 57417 +const AND = 57418 +const OR = 57419 +const EQ = 57420 +const NEQ = 57421 +const LT = 57422 +const LTE = 57423 +const GT = 57424 +const GTE = 57425 +const NRE = 57426 +const RE = 57427 +const DESC = 57428 +const ANCE = 57429 +const SIBL = 57430 +const NOT_CHILD = 57431 +const NOT_PARENT = 57432 +const NOT_DESC = 57433 +const NOT_ANCE = 57434 +const UNION_CHILD = 57435 +const UNION_PARENT = 57436 +const UNION_DESC = 57437 +const UNION_ANCE = 57438 +const UNION_SIBL = 57439 +const ADD = 57440 +const SUB = 57441 +const NOT = 57442 +const MUL = 57443 +const DIV = 57444 +const MOD = 57445 +const POW = 57446 var yyToknames = [...]string{ "$end", @@ -226,6 +227,7 @@ var yyToknames = [...]string{ "COUNT_OVER_TIME", "MIN_OVER_TIME", "MAX_OVER_TIME", + "AVG_OVER_TIME", "QUANTILE_OVER_TIME", "HISTOGRAM_OVER_TIME", "COMPARE", @@ -272,166 +274,167 @@ var yyExca = [...]int{ -1, 1, 1, -1, -2, 0, - -1, 298, + -1, 300, 13, 86, -2, 94, } const yyPrivate = 57344 -const yyLast = 993 +const yyLast = 994 var yyAct = [...]int{ - 101, 5, 6, 8, 7, 100, 98, 12, 284, 18, - 247, 67, 90, 228, 77, 336, 13, 205, 348, 30, - 94, 99, 236, 237, 238, 247, 70, 29, 229, 296, - 2, 347, 153, 154, 157, 155, 329, 234, 235, 66, - 236, 237, 238, 247, 87, 88, 89, 90, 204, 328, - 185, 187, 188, 189, 190, 191, 192, 193, 194, 195, - 196, 197, 198, 199, 200, 201, 202, 333, 327, 324, - 323, 78, 79, 80, 81, 82, 83, 322, 211, 85, - 86, 321, 87, 88, 89, 90, 74, 75, 76, 77, - 383, 85, 86, 232, 87, 88, 89, 90, 231, 366, - 362, 219, 221, 222, 223, 224, 225, 226, 361, 345, - 332, 352, 227, 351, 230, 390, 250, 251, 252, 239, - 240, 241, 242, 243, 244, 246, 245, 355, 204, 248, - 249, 239, 240, 241, 242, 243, 244, 246, 245, 234, - 235, 256, 236, 237, 238, 247, 276, 274, 275, 393, - 303, 234, 235, 394, 236, 237, 238, 247, 356, 331, - 72, 73, 277, 74, 75, 76, 77, 293, 279, 280, - 281, 282, 248, 249, 239, 240, 241, 242, 243, 244, - 246, 245, 294, 354, 257, 258, 272, 209, 293, 205, - 389, 303, 387, 303, 234, 235, 353, 236, 237, 238, - 247, 273, 386, 303, 344, 262, 338, 153, 154, 157, - 155, 337, 263, 278, 264, 298, 378, 303, 209, 265, - 208, 248, 249, 239, 240, 241, 242, 243, 244, 246, - 245, 377, 303, 375, 376, 373, 372, 294, 357, 358, - 334, 335, 300, 234, 235, 388, 236, 237, 238, 247, - 302, 303, 17, 374, 186, 304, 305, 306, 307, 308, - 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, - 319, 85, 86, 371, 87, 88, 89, 90, 370, 360, - 359, 295, 78, 79, 80, 81, 82, 83, 292, 232, - 232, 232, 232, 291, 231, 231, 231, 231, 290, 67, - 343, 67, 85, 86, 232, 87, 88, 89, 90, 231, - 230, 230, 230, 230, 70, 289, 70, 339, 340, 341, - 342, 288, 287, 286, 212, 230, 168, 151, 150, 300, - 149, 148, 346, 78, 79, 80, 81, 82, 83, 147, - 146, 92, 91, 350, 349, 17, 392, 153, 154, 157, - 155, 84, 385, 72, 73, 367, 74, 75, 76, 77, - 232, 232, 285, 71, 326, 231, 231, 143, 144, 145, - 325, 232, 232, 368, 369, 232, 231, 231, 382, 381, - 231, 230, 230, 261, 379, 380, 365, 364, 384, 232, - 260, 259, 230, 230, 231, 255, 230, 254, 253, 28, - 283, 363, 391, 69, 16, 102, 103, 104, 108, 131, - 230, 93, 95, 4, 152, 107, 105, 106, 110, 109, - 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, - 121, 122, 124, 123, 125, 126, 10, 127, 128, 129, - 130, 156, 1, 0, 0, 0, 134, 132, 133, 138, - 139, 140, 135, 141, 136, 142, 137, 330, 0, 102, - 103, 104, 108, 131, 0, 0, 95, 0, 0, 107, - 105, 106, 110, 109, 111, 112, 113, 114, 115, 116, - 117, 118, 119, 120, 121, 122, 124, 123, 125, 126, - 0, 127, 128, 129, 130, 320, 68, 11, 96, 97, - 134, 132, 133, 138, 139, 140, 135, 141, 136, 142, - 137, 0, 0, 0, 0, 0, 0, 0, 0, 248, - 249, 239, 240, 241, 242, 243, 244, 246, 245, 301, - 0, 0, 0, 0, 0, 0, 0, 233, 0, 0, - 0, 234, 235, 0, 236, 237, 238, 247, 0, 0, - 0, 0, 96, 97, 0, 0, 0, 248, 249, 239, - 240, 241, 242, 243, 244, 246, 245, 0, 210, 213, - 214, 215, 216, 217, 218, 206, 0, 0, 0, 234, - 235, 0, 236, 237, 238, 247, 0, 0, 0, 0, - 0, 248, 249, 239, 240, 241, 242, 243, 244, 246, - 245, 248, 249, 239, 240, 241, 242, 243, 244, 246, - 245, 0, 0, 234, 235, 0, 236, 237, 238, 247, - 0, 0, 0, 234, 235, 0, 236, 237, 238, 247, - 19, 20, 21, 0, 17, 203, 165, 48, 53, 0, + 101, 5, 6, 8, 7, 98, 100, 285, 18, 12, + 248, 67, 90, 77, 338, 229, 206, 230, 205, 13, + 298, 2, 94, 30, 99, 237, 238, 239, 248, 70, + 66, 29, 153, 154, 157, 155, 235, 236, 205, 237, + 238, 239, 248, 85, 86, 390, 87, 88, 89, 90, + 186, 188, 189, 190, 191, 192, 193, 194, 195, 196, + 197, 198, 199, 200, 201, 202, 203, 335, 351, 350, + 371, 78, 79, 80, 81, 82, 83, 212, 72, 73, + 348, 74, 75, 76, 77, 87, 88, 89, 90, 331, + 367, 85, 86, 233, 87, 88, 89, 90, 330, 232, + 206, 329, 210, 326, 220, 222, 223, 224, 225, 226, + 227, 334, 402, 325, 228, 324, 323, 231, 251, 252, + 253, 240, 241, 242, 243, 244, 245, 247, 246, 366, + 249, 250, 240, 241, 242, 243, 244, 245, 247, 246, + 365, 235, 236, 355, 237, 238, 239, 248, 74, 75, + 76, 77, 235, 236, 333, 237, 238, 239, 248, 354, + 257, 398, 210, 275, 276, 401, 305, 359, 295, 277, + 280, 281, 282, 283, 249, 250, 240, 241, 242, 243, + 244, 245, 247, 246, 360, 278, 296, 85, 86, 295, + 87, 88, 89, 90, 208, 358, 235, 236, 273, 237, + 238, 239, 248, 258, 259, 397, 305, 357, 153, 154, + 157, 155, 356, 274, 347, 300, 340, 249, 250, 240, + 241, 242, 243, 244, 245, 247, 246, 78, 79, 80, + 81, 82, 83, 339, 302, 395, 305, 394, 305, 235, + 236, 296, 237, 238, 239, 248, 279, 85, 86, 209, + 87, 88, 89, 90, 393, 305, 384, 305, 306, 307, + 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, + 318, 319, 320, 321, 383, 305, 381, 382, 396, 72, + 73, 380, 74, 75, 76, 77, 379, 378, 361, 362, + 233, 233, 233, 233, 233, 377, 232, 232, 232, 232, + 232, 67, 376, 67, 346, 375, 233, 341, 342, 343, + 344, 345, 232, 364, 231, 231, 231, 231, 231, 70, + 363, 70, 302, 349, 336, 337, 304, 305, 297, 17, + 231, 187, 294, 263, 78, 79, 80, 81, 82, 83, + 264, 293, 265, 292, 353, 352, 291, 266, 290, 289, + 153, 154, 157, 155, 72, 73, 288, 74, 75, 76, + 77, 287, 213, 169, 233, 233, 151, 150, 149, 148, + 232, 232, 147, 146, 92, 91, 233, 233, 233, 373, + 374, 233, 232, 232, 232, 17, 84, 232, 231, 231, + 400, 385, 386, 387, 389, 388, 391, 233, 71, 392, + 231, 231, 231, 232, 267, 231, 268, 270, 271, 372, + 269, 286, 399, 102, 103, 104, 108, 131, 272, 93, + 95, 231, 328, 107, 105, 106, 110, 109, 111, 112, + 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, + 124, 123, 125, 126, 327, 127, 128, 129, 130, 68, + 11, 143, 144, 145, 134, 132, 133, 138, 139, 140, + 135, 141, 136, 142, 137, 332, 370, 369, 102, 103, + 104, 108, 131, 262, 261, 95, 260, 256, 107, 105, + 106, 110, 109, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 124, 123, 125, 126, 255, + 127, 128, 129, 130, 322, 254, 28, 96, 97, 134, + 132, 133, 138, 139, 140, 135, 141, 136, 142, 137, + 284, 211, 214, 215, 216, 217, 218, 219, 249, 250, + 240, 241, 242, 243, 244, 245, 247, 246, 303, 368, + 69, 16, 4, 152, 10, 156, 234, 1, 0, 0, + 235, 236, 0, 237, 238, 239, 248, 0, 0, 0, + 0, 0, 96, 97, 0, 0, 0, 249, 250, 240, + 241, 242, 243, 244, 245, 247, 246, 0, 0, 0, + 0, 0, 0, 0, 207, 0, 0, 0, 0, 235, + 236, 0, 237, 238, 239, 248, 0, 0, 0, 0, + 0, 249, 250, 240, 241, 242, 243, 244, 245, 247, + 246, 249, 250, 240, 241, 242, 243, 244, 245, 247, + 246, 0, 0, 235, 236, 0, 237, 238, 239, 248, + 0, 0, 0, 235, 236, 0, 237, 238, 239, 248, + 19, 20, 21, 0, 17, 204, 166, 48, 53, 0, 0, 50, 0, 49, 0, 57, 0, 51, 52, 54, 55, 56, 59, 58, 60, 61, 64, 63, 62, 48, 53, 0, 0, 50, 0, 49, 0, 57, 0, 51, 52, 54, 55, 56, 59, 58, 60, 61, 64, 63, - 62, 23, 26, 24, 25, 27, 14, 166, 15, 0, - 158, 159, 160, 161, 162, 163, 164, 31, 36, 0, - 0, 33, 0, 32, 0, 42, 0, 34, 35, 37, - 38, 39, 40, 41, 43, 44, 45, 46, 47, 31, - 36, 0, 22, 33, 0, 32, 0, 42, 0, 34, - 35, 37, 38, 39, 40, 41, 43, 44, 45, 46, - 47, 19, 20, 21, 0, 17, 0, 165, 0, 19, - 20, 21, 0, 17, 0, 299, 0, 19, 20, 21, - 50, 17, 49, 297, 57, 0, 51, 52, 54, 55, - 56, 59, 58, 60, 61, 64, 63, 62, 0, 207, - 0, 0, 0, 0, 19, 20, 21, 0, 17, 0, - 9, 0, 23, 26, 24, 25, 27, 14, 166, 15, - 23, 26, 24, 25, 27, 14, 0, 15, 23, 26, - 24, 25, 27, 14, 0, 15, 0, 0, 0, 19, - 20, 21, 0, 17, 0, 165, 19, 20, 21, 0, - 0, 0, 220, 22, 0, 23, 26, 24, 25, 27, - 14, 22, 15, 0, 33, 0, 32, 0, 42, 22, + 62, 23, 26, 24, 25, 27, 14, 167, 15, 0, + 158, 159, 160, 161, 162, 163, 164, 165, 31, 36, + 0, 0, 33, 0, 32, 0, 42, 0, 34, 35, + 37, 38, 39, 40, 41, 43, 44, 45, 46, 47, + 31, 36, 0, 22, 33, 0, 32, 0, 42, 0, 34, 35, 37, 38, 39, 40, 41, 43, 44, 45, - 46, 47, 0, 72, 73, 0, 74, 75, 76, 77, - 23, 26, 24, 25, 27, 0, 22, 23, 26, 24, - 25, 27, 0, 266, 131, 267, 269, 270, 0, 268, - 0, 0, 0, 0, 0, 0, 0, 271, 0, 0, - 65, 3, 118, 119, 120, 121, 122, 124, 123, 125, - 126, 22, 127, 128, 129, 130, 0, 0, 22, 0, - 0, 134, 132, 133, 138, 139, 140, 135, 141, 136, - 142, 137, 167, 169, 170, 171, 172, 173, 174, 175, - 176, 177, 178, 179, 180, 181, 182, 183, 184, 102, - 103, 104, 108, 0, 0, 0, 212, 0, 0, 107, - 105, 106, 110, 109, 111, 112, 113, 114, 115, 116, - 117, 102, 103, 104, 108, 0, 0, 0, 0, 0, - 0, 107, 105, 106, 110, 109, 111, 112, 113, 114, - 115, 116, 117, + 46, 47, 19, 20, 21, 0, 17, 0, 166, 0, + 19, 20, 21, 0, 17, 0, 301, 0, 19, 20, + 21, 50, 17, 49, 299, 57, 0, 51, 52, 54, + 55, 56, 59, 58, 60, 61, 64, 63, 62, 0, + 0, 0, 0, 0, 0, 0, 19, 20, 21, 0, + 17, 0, 9, 23, 26, 24, 25, 27, 14, 167, + 15, 23, 26, 24, 25, 27, 14, 0, 15, 23, + 26, 24, 25, 27, 14, 33, 15, 32, 0, 42, + 0, 34, 35, 37, 38, 39, 40, 41, 43, 44, + 45, 46, 47, 0, 0, 22, 0, 23, 26, 24, + 25, 27, 14, 22, 15, 131, 19, 20, 21, 0, + 17, 22, 166, 19, 20, 21, 0, 0, 0, 221, + 0, 0, 0, 118, 119, 120, 121, 122, 124, 123, + 125, 126, 0, 127, 128, 129, 130, 65, 3, 22, + 0, 0, 134, 132, 133, 138, 139, 140, 135, 141, + 136, 142, 137, 0, 0, 0, 0, 23, 26, 24, + 25, 27, 0, 0, 23, 26, 24, 25, 27, 168, + 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, + 102, 103, 104, 108, 0, 0, 22, 213, 0, 0, + 107, 105, 106, 110, 109, 111, 112, 113, 114, 115, + 116, 117, 102, 103, 104, 108, 0, 0, 0, 0, + 0, 0, 107, 105, 106, 110, 109, 111, 112, 113, + 114, 115, 116, 117, } var yyPact = [...]int{ - 778, -46, -55, 644, -1000, 584, -1000, -1000, -1000, 778, - -1000, 256, -1000, -6, 330, 329, -1000, 400, -1000, -1000, - -1000, -1000, 361, 328, 327, 319, 318, 316, -1000, 315, - 624, 314, 314, 314, 314, 314, 314, 314, 314, 314, - 314, 314, 314, 314, 314, 314, 314, 314, 242, 242, - 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, - 242, 242, 242, 242, 242, 622, 115, 562, 766, 207, - 205, 944, 312, 312, 312, 312, 312, 312, -1000, -1000, - -1000, -1000, -1000, -1000, 820, 820, 820, 820, 820, 820, - 820, 454, 875, -1000, 526, 454, 454, 454, -1000, -1000, + 790, -43, -52, 654, -1000, 593, -1000, -1000, -1000, 790, + -1000, 256, -1000, -7, 363, 362, -1000, 408, -1000, -1000, + -1000, -1000, 445, 361, 360, 357, 356, 355, -1000, 354, + 634, 351, 351, 351, 351, 351, 351, 351, 351, 351, + 351, 351, 351, 351, 351, 351, 351, 351, 319, 319, + 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, + 319, 319, 319, 319, 319, 632, 25, 571, 181, 236, + 149, 945, 350, 350, 350, 350, 350, 350, -1000, -1000, + -1000, -1000, -1000, -1000, 857, 857, 857, 857, 857, 857, + 857, 463, 846, -1000, 535, 463, 463, 463, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 394, 393, 391, 137, 387, 386, 379, 178, 856, - 157, 105, 117, -1000, -1000, -1000, 200, 454, 454, 454, - 454, 358, -1000, 584, -1000, -1000, -1000, -1000, 311, 310, - 309, 303, 286, 281, 276, 813, 269, 765, 751, -1000, - -1000, -1000, -1000, 765, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 681, 242, -1000, -1000, -1000, - -1000, 681, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 735, -1000, -1000, -1000, -1000, - 63, -1000, 743, -14, -14, -89, -89, -89, -89, -18, - 820, -56, -56, -91, -91, -91, -91, 516, 237, -1000, - -1000, -1000, -1000, -1000, 454, 454, 454, 454, 454, 454, - 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, - 482, -78, -78, 16, 12, 5, 4, 366, 360, 3, - -16, -29, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 444, - 146, 97, 54, 227, -1000, -62, 198, 193, 875, 875, - 875, 875, 335, 562, 174, 191, 35, 751, -1000, 743, - -57, -1000, -1000, 875, -78, -78, -93, -93, -93, -60, - -60, -60, -60, -60, -60, -60, -60, -93, 42, 42, - -1000, -1000, -1000, -1000, -1000, -34, -47, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 358, 966, 51, 49, 183, - 170, 113, 145, 225, -1000, 735, -1000, -1000, -1000, -1000, - -1000, 268, 267, 46, 38, 380, 37, -1000, 349, 875, - 875, 266, 261, 222, -1000, -1000, 241, 220, 218, 203, - 875, 875, 372, 28, 875, -1000, 346, -1000, -1000, 189, - 179, -1000, -1000, 233, 177, 101, -1000, -1000, 875, -1000, - 340, 136, 140, -1000, -1000, + -1000, 501, 495, 473, 156, 472, 470, 469, 306, 377, + 169, 121, 140, -1000, -1000, -1000, 233, 463, 463, 463, + 463, 407, -1000, 593, -1000, -1000, -1000, -1000, 349, 344, + 337, 336, 334, 331, 329, 320, 850, 316, 745, 762, + -1000, -1000, -1000, -1000, 745, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 691, 319, -1000, -1000, + -1000, -1000, 691, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 746, -1000, -1000, -1000, + -1000, -20, -1000, 754, 47, 47, -91, -91, -91, -91, + -55, 857, -16, -16, -92, -92, -92, -92, 525, 313, + -1000, -1000, -1000, -1000, -1000, 463, 463, 463, 463, 463, + 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, + 463, 491, -76, -76, 51, 50, 48, 38, 440, 418, + 36, 33, 24, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 452, 141, 98, 54, 311, -1000, -64, 220, 203, 846, + 846, 846, 846, 846, 375, 571, 89, 201, 5, 762, + -1000, 754, -59, -1000, -1000, 846, -76, -76, -94, -94, + -94, -62, -62, -62, -62, -62, -62, -62, -62, -94, + 43, 43, -1000, -1000, -1000, -1000, -1000, 4, 3, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 407, 967, 97, + 81, 199, 194, 182, 153, 171, 275, -1000, 746, -1000, + -1000, -1000, -1000, -1000, 308, 301, 78, 67, 28, 460, + 8, -1000, 403, 846, 846, 293, 290, 283, 273, -1000, + -1000, 269, 263, 261, 243, 846, 846, 846, 388, -17, + 846, -1000, 393, -1000, -1000, 241, 224, 222, -1000, -1000, + 266, 192, 147, -1000, -1000, -1000, 846, -1000, 384, 152, + 99, -1000, -1000, } var yyPgo = [...]int{ - 0, 442, 4, 441, 3, 13, 1, 900, 436, 29, - 7, 2, 351, 414, 413, 496, 16, 404, 403, 9, - 20, 6, 21, 5, 0, 28, 401, 8, 400, 399, + 0, 547, 4, 545, 3, 15, 1, 887, 544, 20, + 9, 2, 386, 543, 542, 449, 19, 541, 540, 8, + 22, 5, 24, 6, 0, 17, 539, 7, 520, 506, } var yyR1 = [...]int{ @@ -447,15 +450,15 @@ var yyR1 = [...]int{ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 19, 19, 19, 19, 19, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, - 13, 27, 29, 28, 28, 20, 20, 20, 20, 20, + 13, 13, 13, 27, 29, 28, 28, 20, 20, 20, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, - 20, 20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, - 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, - 22, 22, 22, 22, 22, 22, 22, 24, 24, 24, + 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, 22, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 23, 23, 23, 23, 23, 23, 23, - 23, 23, + 24, 24, 24, 24, 24, 23, 23, 23, 23, 23, + 23, 23, 23, 23, } var yyR2 = [...]int{ @@ -470,59 +473,60 @@ var yyR2 = [...]int{ 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 2, 2, 2, 3, 4, 4, 4, 4, 3, 7, 3, 7, - 4, 8, 4, 8, 6, 10, 4, 8, 4, 6, - 10, 3, 4, 1, 3, 3, 3, 3, 3, 3, + 4, 8, 4, 8, 4, 8, 6, 10, 4, 8, + 4, 6, 10, 3, 4, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, + 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 3, 3, 3, 3, 4, 4, 3, - 3, 3, + 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, + 4, 3, 3, 3, } var yyChk = [...]int{ -1000, -1, -9, -7, -14, -6, -11, -2, -4, 12, -8, -15, -10, -16, 62, 64, -17, 10, -19, 6, - 7, 8, 98, 57, 59, 60, 58, 61, -29, 73, - 74, 75, 81, 79, 85, 86, 76, 87, 88, 89, - 90, 91, 83, 92, 93, 94, 95, 96, 75, 81, - 79, 85, 86, 76, 87, 88, 89, 83, 91, 90, - 92, 93, 96, 95, 94, -7, -9, -6, -15, -18, - -16, -12, 97, 98, 100, 101, 102, 103, 77, 78, - 79, 80, 81, 82, -12, 97, 98, 100, 101, 102, - 103, 12, 12, 11, -20, 12, 98, 99, -21, -22, + 7, 8, 99, 57, 59, 60, 58, 61, -29, 74, + 75, 76, 82, 80, 86, 87, 77, 88, 89, 90, + 91, 92, 84, 93, 94, 95, 96, 97, 76, 82, + 80, 86, 87, 77, 88, 89, 90, 84, 92, 91, + 93, 94, 97, 96, 95, -7, -9, -6, -15, -18, + -16, -12, 98, 99, 101, 102, 103, 104, 78, 79, + 80, 81, 82, 83, -12, 98, 99, 101, 102, 103, + 104, 12, 12, 11, -20, 12, 99, 100, -21, -22, -23, -24, 5, 6, 7, 16, 17, 15, 8, 19, 18, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 32, 34, 35, 37, 38, 39, 40, 9, 47, 48, 46, 52, 54, 56, 49, 50, 51, 53, 55, 6, 7, 8, 12, 12, 12, 12, 12, 12, -13, -6, -11, -2, -3, -4, 66, 67, - 68, 69, 70, 71, 72, 12, 63, -7, 12, -7, + 68, 69, 70, 71, 72, 73, 12, 63, -7, 12, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, - -7, -7, -7, -7, -7, -6, 12, -6, -6, -6, + -7, -7, -7, -7, -7, -7, -6, 12, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, - -6, -6, -6, 13, 13, 74, 13, 13, 13, 13, - -15, -21, 12, -15, -15, -15, -15, -15, -15, -16, - 12, -16, -16, -16, -16, -16, -16, -20, -5, -25, - -22, -23, -24, 11, 97, 98, 100, 101, 102, 77, - 78, 79, 80, 81, 82, 84, 83, 103, 75, 76, - -20, -20, -20, 4, 4, 4, 4, 47, 48, 4, - 4, 4, 27, 34, 36, 41, 27, 29, 33, 30, - 31, 41, 29, 44, 42, 43, 29, 45, 13, -20, - -20, -20, -20, -28, -27, 4, 12, 12, 12, 12, - 12, 12, 12, -6, -16, 12, -9, 12, -19, 12, - -9, 13, 13, 14, -20, -20, -20, -20, -20, -20, + -6, -6, -6, -6, 13, 13, 75, 13, 13, 13, + 13, -15, -21, 12, -15, -15, -15, -15, -15, -15, + -16, 12, -16, -16, -16, -16, -16, -16, -20, -5, + -25, -22, -23, -24, 11, 98, 99, 101, 102, 103, + 78, 79, 80, 81, 82, 83, 85, 84, 104, 76, + 77, -20, -20, -20, 4, 4, 4, 4, 47, 48, + 4, 4, 4, 27, 34, 36, 41, 27, 29, 33, + 30, 31, 41, 29, 44, 42, 43, 29, 45, 13, + -20, -20, -20, -20, -28, -27, 4, 12, 12, 12, + 12, 12, 12, 12, 12, -6, -16, 12, -9, 12, + -19, 12, -9, 13, 13, 14, -20, -20, -20, -20, -20, -20, -20, -20, -20, -20, -20, -20, -20, -20, - 13, 65, 65, 65, 65, 4, 4, 65, 65, 65, - 13, 13, 13, 13, 13, 14, 77, 13, 13, -25, - -25, -25, -25, -10, 13, 74, -25, 65, 65, -27, - -21, 62, 62, 13, 13, 14, 13, 13, 14, 12, - 12, 62, 62, -26, 7, 6, 62, 6, -5, -5, - 12, 12, 14, 13, 12, 13, 14, 13, 13, -5, - -5, 7, 6, 62, -5, 6, 13, 13, 12, 13, - 14, -5, 6, 13, 13, + -20, -20, 13, 65, 65, 65, 65, 4, 4, 65, + 65, 65, 13, 13, 13, 13, 13, 14, 78, 13, + 13, -25, -25, -25, -25, -25, -10, 13, 75, -25, + 65, 65, -27, -21, 62, 62, 13, 13, 13, 14, + 13, 13, 14, 12, 12, 62, 62, 62, -26, 7, + 6, 62, 6, -5, -5, 12, 12, 12, 14, 13, + 12, 13, 14, 13, 13, -5, -5, -5, 7, 6, + 62, -5, 6, 13, 13, 13, 12, 13, 14, -5, + 6, 13, 13, } var yyDef = [...]int{ @@ -535,37 +539,38 @@ var yyDef = [...]int{ 0, 0, 0, 0, 0, 0, 0, 26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 69, 70, 71, 72, 73, 74, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 66, 0, 0, 0, 0, 144, 145, - 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, - 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, - 176, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 66, 0, 0, 0, 0, 146, 147, + 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 98, 99, 100, 0, 0, 0, 0, 0, 0, 4, 30, 31, 32, 33, 34, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 7, 0, 8, - 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 48, 0, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, - 62, 63, 64, 6, 25, 0, 47, 77, 85, 87, - 75, 76, 0, 78, 79, 80, 81, 82, 83, 68, - 0, 88, 89, 90, 91, 92, 93, 0, 0, 41, - 38, 39, 40, 67, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 48, 0, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 6, 25, 0, 47, 77, 85, + 87, 75, 76, 0, 78, 79, 80, 81, 82, 83, + 68, 0, 88, 89, 90, 91, 92, 93, 0, 0, + 41, 38, 39, 40, 67, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 144, 145, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 192, 193, 194, 101, + 0, 0, 0, 0, 0, 125, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 142, 143, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 177, 178, 179, 180, 181, 182, 183, 184, - 185, 186, 187, 188, 189, 190, 191, 192, 101, 0, - 0, 0, 0, 0, 123, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, -2, 0, - 0, 35, 37, 0, 126, 127, 128, 129, 130, 131, + -2, 0, 0, 35, 37, 0, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, - 125, 193, 194, 195, 196, 0, 0, 199, 200, 201, - 102, 103, 104, 105, 122, 0, 0, 106, 108, 0, - 0, 0, 0, 0, 36, 0, 42, 197, 198, 124, - 121, 0, 0, 110, 112, 0, 116, 118, 0, 0, - 0, 0, 0, 0, 43, 44, 0, 0, 0, 0, - 0, 0, 0, 114, 0, 119, 0, 107, 109, 0, - 0, 45, 46, 0, 0, 0, 111, 113, 0, 117, - 0, 0, 0, 115, 120, + 142, 143, 127, 195, 196, 197, 198, 0, 0, 201, + 202, 203, 102, 103, 104, 105, 124, 0, 0, 106, + 108, 0, 0, 0, 0, 0, 0, 36, 0, 42, + 199, 200, 126, 123, 0, 0, 110, 112, 114, 0, + 118, 120, 0, 0, 0, 0, 0, 0, 0, 43, + 44, 0, 0, 0, 0, 0, 0, 0, 0, 116, + 0, 121, 0, 107, 109, 0, 0, 0, 45, 46, + 0, 0, 0, 111, 113, 115, 0, 119, 0, 0, + 0, 117, 122, } var yyTok1 = [...]int{ @@ -583,7 +588,7 @@ var yyTok2 = [...]int{ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, - 102, 103, + 102, 103, 104, } var yyTok3 = [...]int{ 0, @@ -1605,530 +1610,542 @@ yydefault: yyVAL.metricsAggregation = newMetricsAggregateWithAttr(metricsAggregateMaxOverTime, yyDollar[3].attribute, yyDollar[7].attributeList) } case 114: - yyDollar = yyS[yypt-6 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] //line pkg/traceql/expr.y:305 { - yyVAL.metricsAggregation = newMetricsAggregateQuantileOverTime(yyDollar[3].attribute, yyDollar[5].numericList, nil) + yyVAL.metricsAggregation = newMetricsAggregateWithAttr(metricsAggregateAvgOverTime, yyDollar[3].attribute, nil) } case 115: - yyDollar = yyS[yypt-10 : yypt+1] + yyDollar = yyS[yypt-8 : yypt+1] //line pkg/traceql/expr.y:306 { - yyVAL.metricsAggregation = newMetricsAggregateQuantileOverTime(yyDollar[3].attribute, yyDollar[5].numericList, yyDollar[9].attributeList) + yyVAL.metricsAggregation = newMetricsAggregateWithAttr(metricsAggregateAvgOverTime, yyDollar[3].attribute, yyDollar[7].attributeList) } case 116: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-6 : yypt+1] //line pkg/traceql/expr.y:307 { - yyVAL.metricsAggregation = newMetricsAggregateWithAttr(metricsAggregateHistogramOverTime, yyDollar[3].attribute, nil) + yyVAL.metricsAggregation = newMetricsAggregateQuantileOverTime(yyDollar[3].attribute, yyDollar[5].numericList, nil) } case 117: - yyDollar = yyS[yypt-8 : yypt+1] + yyDollar = yyS[yypt-10 : yypt+1] //line pkg/traceql/expr.y:308 { - yyVAL.metricsAggregation = newMetricsAggregateWithAttr(metricsAggregateHistogramOverTime, yyDollar[3].attribute, yyDollar[7].attributeList) + yyVAL.metricsAggregation = newMetricsAggregateQuantileOverTime(yyDollar[3].attribute, yyDollar[5].numericList, yyDollar[9].attributeList) } case 118: yyDollar = yyS[yypt-4 : yypt+1] //line pkg/traceql/expr.y:309 { - yyVAL.metricsAggregation = newMetricsCompare(yyDollar[3].spansetFilter, 10, 0, 0) + yyVAL.metricsAggregation = newMetricsAggregateWithAttr(metricsAggregateHistogramOverTime, yyDollar[3].attribute, nil) } case 119: - yyDollar = yyS[yypt-6 : yypt+1] + yyDollar = yyS[yypt-8 : yypt+1] //line pkg/traceql/expr.y:310 { - yyVAL.metricsAggregation = newMetricsCompare(yyDollar[3].spansetFilter, yyDollar[5].staticInt, 0, 0) + yyVAL.metricsAggregation = newMetricsAggregateWithAttr(metricsAggregateHistogramOverTime, yyDollar[3].attribute, yyDollar[7].attributeList) } case 120: - yyDollar = yyS[yypt-10 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] //line pkg/traceql/expr.y:311 { - yyVAL.metricsAggregation = newMetricsCompare(yyDollar[3].spansetFilter, yyDollar[5].staticInt, yyDollar[7].staticInt, yyDollar[9].staticInt) + yyVAL.metricsAggregation = newMetricsCompare(yyDollar[3].spansetFilter, 10, 0, 0) } case 121: - yyDollar = yyS[yypt-3 : yypt+1] -//line pkg/traceql/expr.y:318 + yyDollar = yyS[yypt-6 : yypt+1] +//line pkg/traceql/expr.y:312 { - yyVAL.hint = newHint(yyDollar[1].staticStr, yyDollar[3].static) + yyVAL.metricsAggregation = newMetricsCompare(yyDollar[3].spansetFilter, yyDollar[5].staticInt, 0, 0) } case 122: - yyDollar = yyS[yypt-4 : yypt+1] -//line pkg/traceql/expr.y:322 + yyDollar = yyS[yypt-10 : yypt+1] +//line pkg/traceql/expr.y:313 { - yyVAL.hints = newHints(yyDollar[3].hintList) + yyVAL.metricsAggregation = newMetricsCompare(yyDollar[3].spansetFilter, yyDollar[5].staticInt, yyDollar[7].staticInt, yyDollar[9].staticInt) } case 123: - yyDollar = yyS[yypt-1 : yypt+1] -//line pkg/traceql/expr.y:326 + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:320 { - yyVAL.hintList = []*Hint{yyDollar[1].hint} + yyVAL.hint = newHint(yyDollar[1].staticStr, yyDollar[3].static) } case 124: - yyDollar = yyS[yypt-3 : yypt+1] -//line pkg/traceql/expr.y:327 + yyDollar = yyS[yypt-4 : yypt+1] +//line pkg/traceql/expr.y:324 { - yyVAL.hintList = append(yyDollar[1].hintList, yyDollar[3].hint) + yyVAL.hints = newHints(yyDollar[3].hintList) } case 125: - yyDollar = yyS[yypt-3 : yypt+1] -//line pkg/traceql/expr.y:335 + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:328 { - yyVAL.fieldExpression = yyDollar[2].fieldExpression + yyVAL.hintList = []*Hint{yyDollar[1].hint} } case 126: yyDollar = yyS[yypt-3 : yypt+1] -//line pkg/traceql/expr.y:336 +//line pkg/traceql/expr.y:329 { - yyVAL.fieldExpression = newBinaryOperation(OpAdd, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + yyVAL.hintList = append(yyDollar[1].hintList, yyDollar[3].hint) } case 127: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:337 { - yyVAL.fieldExpression = newBinaryOperation(OpSub, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + yyVAL.fieldExpression = yyDollar[2].fieldExpression } case 128: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:338 { - yyVAL.fieldExpression = newBinaryOperation(OpMult, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + yyVAL.fieldExpression = newBinaryOperation(OpAdd, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) } case 129: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:339 { - yyVAL.fieldExpression = newBinaryOperation(OpDiv, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + yyVAL.fieldExpression = newBinaryOperation(OpSub, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) } case 130: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:340 { - yyVAL.fieldExpression = newBinaryOperation(OpMod, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + yyVAL.fieldExpression = newBinaryOperation(OpMult, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) } case 131: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:341 { - yyVAL.fieldExpression = newBinaryOperation(OpEqual, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + yyVAL.fieldExpression = newBinaryOperation(OpDiv, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) } case 132: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:342 { - yyVAL.fieldExpression = newBinaryOperation(OpNotEqual, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + yyVAL.fieldExpression = newBinaryOperation(OpMod, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) } case 133: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:343 { - yyVAL.fieldExpression = newBinaryOperation(OpLess, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + yyVAL.fieldExpression = newBinaryOperation(OpEqual, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) } case 134: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:344 { - yyVAL.fieldExpression = newBinaryOperation(OpLessEqual, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + yyVAL.fieldExpression = newBinaryOperation(OpNotEqual, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) } case 135: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:345 { - yyVAL.fieldExpression = newBinaryOperation(OpGreater, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + yyVAL.fieldExpression = newBinaryOperation(OpLess, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) } case 136: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:346 { - yyVAL.fieldExpression = newBinaryOperation(OpGreaterEqual, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + yyVAL.fieldExpression = newBinaryOperation(OpLessEqual, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) } case 137: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:347 { - yyVAL.fieldExpression = newBinaryOperation(OpRegex, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + yyVAL.fieldExpression = newBinaryOperation(OpGreater, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) } case 138: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:348 { - yyVAL.fieldExpression = newBinaryOperation(OpNotRegex, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + yyVAL.fieldExpression = newBinaryOperation(OpGreaterEqual, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) } case 139: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:349 { - yyVAL.fieldExpression = newBinaryOperation(OpPower, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + yyVAL.fieldExpression = newBinaryOperation(OpRegex, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) } case 140: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:350 { - yyVAL.fieldExpression = newBinaryOperation(OpAnd, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + yyVAL.fieldExpression = newBinaryOperation(OpNotRegex, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) } case 141: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:351 { - yyVAL.fieldExpression = newBinaryOperation(OpOr, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) + yyVAL.fieldExpression = newBinaryOperation(OpPower, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) } case 142: - yyDollar = yyS[yypt-2 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:352 { - yyVAL.fieldExpression = newUnaryOperation(OpSub, yyDollar[2].fieldExpression) + yyVAL.fieldExpression = newBinaryOperation(OpAnd, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) } case 143: - yyDollar = yyS[yypt-2 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:353 { - yyVAL.fieldExpression = newUnaryOperation(OpNot, yyDollar[2].fieldExpression) + yyVAL.fieldExpression = newBinaryOperation(OpOr, yyDollar[1].fieldExpression, yyDollar[3].fieldExpression) } case 144: - yyDollar = yyS[yypt-1 : yypt+1] + yyDollar = yyS[yypt-2 : yypt+1] //line pkg/traceql/expr.y:354 { - yyVAL.fieldExpression = yyDollar[1].static + yyVAL.fieldExpression = newUnaryOperation(OpSub, yyDollar[2].fieldExpression) } case 145: - yyDollar = yyS[yypt-1 : yypt+1] + yyDollar = yyS[yypt-2 : yypt+1] //line pkg/traceql/expr.y:355 { - yyVAL.fieldExpression = yyDollar[1].intrinsicField + yyVAL.fieldExpression = newUnaryOperation(OpNot, yyDollar[2].fieldExpression) } case 146: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:356 { - yyVAL.fieldExpression = yyDollar[1].attributeField + yyVAL.fieldExpression = yyDollar[1].static } case 147: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:357 { - yyVAL.fieldExpression = yyDollar[1].scopedIntrinsicField + yyVAL.fieldExpression = yyDollar[1].intrinsicField } case 148: yyDollar = yyS[yypt-1 : yypt+1] -//line pkg/traceql/expr.y:364 +//line pkg/traceql/expr.y:358 { - yyVAL.static = NewStaticString(yyDollar[1].staticStr) + yyVAL.fieldExpression = yyDollar[1].attributeField } case 149: yyDollar = yyS[yypt-1 : yypt+1] -//line pkg/traceql/expr.y:365 +//line pkg/traceql/expr.y:359 { - yyVAL.static = NewStaticInt(yyDollar[1].staticInt) + yyVAL.fieldExpression = yyDollar[1].scopedIntrinsicField } case 150: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:366 { - yyVAL.static = NewStaticFloat(yyDollar[1].staticFloat) + yyVAL.static = NewStaticString(yyDollar[1].staticStr) } case 151: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:367 { - yyVAL.static = NewStaticBool(true) + yyVAL.static = NewStaticInt(yyDollar[1].staticInt) } case 152: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:368 { - yyVAL.static = NewStaticBool(false) + yyVAL.static = NewStaticFloat(yyDollar[1].staticFloat) } case 153: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:369 { - yyVAL.static = NewStaticNil() + yyVAL.static = NewStaticBool(true) } case 154: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:370 { - yyVAL.static = NewStaticDuration(yyDollar[1].staticDuration) + yyVAL.static = NewStaticBool(false) } case 155: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:371 { - yyVAL.static = NewStaticStatus(StatusOk) + yyVAL.static = NewStaticNil() } case 156: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:372 { - yyVAL.static = NewStaticStatus(StatusError) + yyVAL.static = NewStaticDuration(yyDollar[1].staticDuration) } case 157: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:373 { - yyVAL.static = NewStaticStatus(StatusUnset) + yyVAL.static = NewStaticStatus(StatusOk) } case 158: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:374 { - yyVAL.static = NewStaticKind(KindUnspecified) + yyVAL.static = NewStaticStatus(StatusError) } case 159: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:375 { - yyVAL.static = NewStaticKind(KindInternal) + yyVAL.static = NewStaticStatus(StatusUnset) } case 160: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:376 { - yyVAL.static = NewStaticKind(KindServer) + yyVAL.static = NewStaticKind(KindUnspecified) } case 161: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:377 { - yyVAL.static = NewStaticKind(KindClient) + yyVAL.static = NewStaticKind(KindInternal) } case 162: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:378 { - yyVAL.static = NewStaticKind(KindProducer) + yyVAL.static = NewStaticKind(KindServer) } case 163: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:379 { - yyVAL.static = NewStaticKind(KindConsumer) + yyVAL.static = NewStaticKind(KindClient) } case 164: yyDollar = yyS[yypt-1 : yypt+1] -//line pkg/traceql/expr.y:385 +//line pkg/traceql/expr.y:380 { - yyVAL.intrinsicField = NewIntrinsic(IntrinsicDuration) + yyVAL.static = NewStaticKind(KindProducer) } case 165: yyDollar = yyS[yypt-1 : yypt+1] -//line pkg/traceql/expr.y:386 +//line pkg/traceql/expr.y:381 { - yyVAL.intrinsicField = NewIntrinsic(IntrinsicChildCount) + yyVAL.static = NewStaticKind(KindConsumer) } case 166: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:387 { - yyVAL.intrinsicField = NewIntrinsic(IntrinsicName) + yyVAL.intrinsicField = NewIntrinsic(IntrinsicDuration) } case 167: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:388 { - yyVAL.intrinsicField = NewIntrinsic(IntrinsicStatus) + yyVAL.intrinsicField = NewIntrinsic(IntrinsicChildCount) } case 168: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:389 { - yyVAL.intrinsicField = NewIntrinsic(IntrinsicStatusMessage) + yyVAL.intrinsicField = NewIntrinsic(IntrinsicName) } case 169: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:390 { - yyVAL.intrinsicField = NewIntrinsic(IntrinsicKind) + yyVAL.intrinsicField = NewIntrinsic(IntrinsicStatus) } case 170: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:391 { - yyVAL.intrinsicField = NewIntrinsic(IntrinsicParent) + yyVAL.intrinsicField = NewIntrinsic(IntrinsicStatusMessage) } case 171: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:392 { - yyVAL.intrinsicField = NewIntrinsic(IntrinsicTraceRootSpan) + yyVAL.intrinsicField = NewIntrinsic(IntrinsicKind) } case 172: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:393 { - yyVAL.intrinsicField = NewIntrinsic(IntrinsicTraceRootService) + yyVAL.intrinsicField = NewIntrinsic(IntrinsicParent) } case 173: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:394 { - yyVAL.intrinsicField = NewIntrinsic(IntrinsicTraceDuration) + yyVAL.intrinsicField = NewIntrinsic(IntrinsicTraceRootSpan) } case 174: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:395 { - yyVAL.intrinsicField = NewIntrinsic(IntrinsicNestedSetLeft) + yyVAL.intrinsicField = NewIntrinsic(IntrinsicTraceRootService) } case 175: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:396 { - yyVAL.intrinsicField = NewIntrinsic(IntrinsicNestedSetRight) + yyVAL.intrinsicField = NewIntrinsic(IntrinsicTraceDuration) } case 176: yyDollar = yyS[yypt-1 : yypt+1] //line pkg/traceql/expr.y:397 { - yyVAL.intrinsicField = NewIntrinsic(IntrinsicNestedSetParent) + yyVAL.intrinsicField = NewIntrinsic(IntrinsicNestedSetLeft) } case 177: - yyDollar = yyS[yypt-2 : yypt+1] -//line pkg/traceql/expr.y:402 + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:398 { - yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicTraceDuration) + yyVAL.intrinsicField = NewIntrinsic(IntrinsicNestedSetRight) } case 178: - yyDollar = yyS[yypt-2 : yypt+1] -//line pkg/traceql/expr.y:403 + yyDollar = yyS[yypt-1 : yypt+1] +//line pkg/traceql/expr.y:399 { - yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicTraceRootSpan) + yyVAL.intrinsicField = NewIntrinsic(IntrinsicNestedSetParent) } case 179: yyDollar = yyS[yypt-2 : yypt+1] //line pkg/traceql/expr.y:404 { - yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicTraceRootService) + yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicTraceDuration) } case 180: yyDollar = yyS[yypt-2 : yypt+1] //line pkg/traceql/expr.y:405 { - yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicTraceID) + yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicTraceRootSpan) } case 181: yyDollar = yyS[yypt-2 : yypt+1] -//line pkg/traceql/expr.y:407 +//line pkg/traceql/expr.y:406 { - yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicDuration) + yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicTraceRootService) } case 182: yyDollar = yyS[yypt-2 : yypt+1] -//line pkg/traceql/expr.y:408 +//line pkg/traceql/expr.y:407 { - yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicName) + yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicTraceID) } case 183: yyDollar = yyS[yypt-2 : yypt+1] //line pkg/traceql/expr.y:409 { - yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicKind) + yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicDuration) } case 184: yyDollar = yyS[yypt-2 : yypt+1] //line pkg/traceql/expr.y:410 { - yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicStatus) + yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicName) } case 185: yyDollar = yyS[yypt-2 : yypt+1] //line pkg/traceql/expr.y:411 { - yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicStatusMessage) + yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicKind) } case 186: yyDollar = yyS[yypt-2 : yypt+1] //line pkg/traceql/expr.y:412 { - yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicSpanID) + yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicStatus) } case 187: yyDollar = yyS[yypt-2 : yypt+1] -//line pkg/traceql/expr.y:414 +//line pkg/traceql/expr.y:413 { - yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicEventName) + yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicStatusMessage) } case 188: yyDollar = yyS[yypt-2 : yypt+1] -//line pkg/traceql/expr.y:415 +//line pkg/traceql/expr.y:414 { - yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicEventTimeSinceStart) + yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicSpanID) } case 189: yyDollar = yyS[yypt-2 : yypt+1] -//line pkg/traceql/expr.y:417 +//line pkg/traceql/expr.y:416 { - yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicLinkTraceID) + yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicEventName) } case 190: yyDollar = yyS[yypt-2 : yypt+1] -//line pkg/traceql/expr.y:418 +//line pkg/traceql/expr.y:417 { - yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicLinkSpanID) + yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicEventTimeSinceStart) } case 191: yyDollar = yyS[yypt-2 : yypt+1] -//line pkg/traceql/expr.y:420 +//line pkg/traceql/expr.y:419 { - yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicInstrumentationName) + yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicLinkTraceID) } case 192: yyDollar = yyS[yypt-2 : yypt+1] -//line pkg/traceql/expr.y:421 +//line pkg/traceql/expr.y:420 { - yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicInstrumentationVersion) + yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicLinkSpanID) } case 193: - yyDollar = yyS[yypt-3 : yypt+1] -//line pkg/traceql/expr.y:425 + yyDollar = yyS[yypt-2 : yypt+1] +//line pkg/traceql/expr.y:422 { - yyVAL.attributeField = NewAttribute(yyDollar[2].staticStr) + yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicInstrumentationName) } case 194: - yyDollar = yyS[yypt-3 : yypt+1] -//line pkg/traceql/expr.y:426 + yyDollar = yyS[yypt-2 : yypt+1] +//line pkg/traceql/expr.y:423 { - yyVAL.attributeField = NewScopedAttribute(AttributeScopeResource, false, yyDollar[2].staticStr) + yyVAL.scopedIntrinsicField = NewIntrinsic(IntrinsicInstrumentationVersion) } case 195: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:427 { - yyVAL.attributeField = NewScopedAttribute(AttributeScopeSpan, false, yyDollar[2].staticStr) + yyVAL.attributeField = NewAttribute(yyDollar[2].staticStr) } case 196: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:428 { - yyVAL.attributeField = NewScopedAttribute(AttributeScopeNone, true, yyDollar[2].staticStr) + yyVAL.attributeField = NewScopedAttribute(AttributeScopeResource, false, yyDollar[2].staticStr) } case 197: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:429 { - yyVAL.attributeField = NewScopedAttribute(AttributeScopeResource, true, yyDollar[3].staticStr) + yyVAL.attributeField = NewScopedAttribute(AttributeScopeSpan, false, yyDollar[2].staticStr) } case 198: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:430 { - yyVAL.attributeField = NewScopedAttribute(AttributeScopeSpan, true, yyDollar[3].staticStr) + yyVAL.attributeField = NewScopedAttribute(AttributeScopeNone, true, yyDollar[2].staticStr) } case 199: - yyDollar = yyS[yypt-3 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] //line pkg/traceql/expr.y:431 { - yyVAL.attributeField = NewScopedAttribute(AttributeScopeEvent, false, yyDollar[2].staticStr) + yyVAL.attributeField = NewScopedAttribute(AttributeScopeResource, true, yyDollar[3].staticStr) } case 200: - yyDollar = yyS[yypt-3 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] //line pkg/traceql/expr.y:432 { - yyVAL.attributeField = NewScopedAttribute(AttributeScopeLink, false, yyDollar[2].staticStr) + yyVAL.attributeField = NewScopedAttribute(AttributeScopeSpan, true, yyDollar[3].staticStr) } case 201: yyDollar = yyS[yypt-3 : yypt+1] //line pkg/traceql/expr.y:433 + { + yyVAL.attributeField = NewScopedAttribute(AttributeScopeEvent, false, yyDollar[2].staticStr) + } + case 202: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:434 + { + yyVAL.attributeField = NewScopedAttribute(AttributeScopeLink, false, yyDollar[2].staticStr) + } + case 203: + yyDollar = yyS[yypt-3 : yypt+1] +//line pkg/traceql/expr.y:435 { yyVAL.attributeField = NewScopedAttribute(AttributeScopeInstrumentation, false, yyDollar[2].staticStr) } diff --git a/pkg/traceql/lexer.go b/pkg/traceql/lexer.go index 1960c9c9551..2306b34ca71 100644 --- a/pkg/traceql/lexer.go +++ b/pkg/traceql/lexer.go @@ -104,6 +104,7 @@ var tokens = map[string]int{ "count_over_time": COUNT_OVER_TIME, "min_over_time": MIN_OVER_TIME, "max_over_time": MAX_OVER_TIME, + "avg_over_time": AVG_OVER_TIME, "quantile_over_time": QUANTILE_OVER_TIME, "histogram_over_time": HISTOGRAM_OVER_TIME, "compare": COMPARE, diff --git a/pkg/traceql/parse_test.go b/pkg/traceql/parse_test.go index c23101858a0..fb208e588f5 100644 --- a/pkg/traceql/parse_test.go +++ b/pkg/traceql/parse_test.go @@ -1393,6 +1393,18 @@ func TestMetrics(t *testing.T) { }), ), }, + { + in: `{ } | avg_over_time(duration) by(name, span.http.status_code)`, + expected: newRootExprWithMetrics( + newPipeline(newSpansetFilter(NewStaticBool(true))), + newMetricsAggregateWithAttr(metricsAggregateAvgOverTime, + NewIntrinsic(IntrinsicDuration), + []Attribute{ + NewIntrinsic(IntrinsicName), + NewScopedAttribute(AttributeScopeSpan, false, "http.status_code"), + }), + ), + }, { in: `{ } | quantile_over_time(duration, 0, 0.90, 0.95, 1) by(name, span.http.status_code)`, expected: newRootExprWithMetrics( diff --git a/pkg/traceql/test_examples.yaml b/pkg/traceql/test_examples.yaml index 69c7da8dcf0..836fbaa3dbe 100644 --- a/pkg/traceql/test_examples.yaml +++ b/pkg/traceql/test_examples.yaml @@ -145,6 +145,8 @@ valid: - '{} | rate()' - '{} | count_over_time() by (name) with(sample=0.1)' - '{} | min_over_time(duration) by (span.http.path)' + - '{} | max_over_time(duration) by (span.http.path)' + - '{} | avg_over_time(duration) by (span.http.path)' - '{} | quantile_over_time(duration, 0, 0.9, 1) by (span.http.path)' # undocumented - nested set - '{ nestedSetLeft > 3 }' From cd722d9afb692906e5fbb6b66e35caea6d5e03f4 Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Thu, 12 Sep 2024 16:49:21 +0200 Subject: [PATCH 02/29] fix test --- pkg/traceql/engine_metrics_test.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pkg/traceql/engine_metrics_test.go b/pkg/traceql/engine_metrics_test.go index 7cc8d6fdc2c..b7fc33edc0a 100644 --- a/pkg/traceql/engine_metrics_test.go +++ b/pkg/traceql/engine_metrics_test.go @@ -649,11 +649,10 @@ func TestAvgOverTimeForDuration(t *testing.T) { // We cannot compare with require.Equal because NaN != NaN assert.True(t, math.IsNaN(fooBaz.Values[0])) assert.True(t, math.IsNaN(fooBaz.Values[1])) - // assert.Equal(t, 300/float64(time.Second), fooBaz.Values[2]) + assert.Equal(t, 200., fooBaz.Values[2]*float64(time.Second)) - // foo.bar = (0.000000128, 0.000000128, NaN) - // assert.Equal(t, 100/float64(time.Second), fooBar.Values[0]) - // assert.Equal(t, 400/float64(time.Second), fooBar.Values[1]) + assert.Equal(t, 100., fooBar.Values[0]*float64(time.Second)) + assert.Equal(t, 400., fooBar.Values[1]*float64(time.Second)) assert.True(t, math.IsNaN(fooBar.Values[2])) } From bbc96e6bd0d6a570d8bfbe988a8960fd5cd182ea Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Thu, 12 Sep 2024 16:54:13 +0200 Subject: [PATCH 03/29] wording --- pkg/traceql/engine_metrics.go | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/pkg/traceql/engine_metrics.go b/pkg/traceql/engine_metrics.go index 39e34fd33ec..3864f7c7383 100644 --- a/pkg/traceql/engine_metrics.go +++ b/pkg/traceql/engine_metrics.go @@ -1153,7 +1153,7 @@ type SimpleAggregator struct { ss SeriesSet exemplarBuckets *bucketSet len int - aggregationFunc func(b *SimpleAggregator, serie string, pos int, newValue float64) + aggregationFunc func(b *SimpleAggregator, promLabel string, pos int, newValue float64) start, end, step uint64 initWithNaN bool @@ -1166,46 +1166,46 @@ type SimpleAggregator struct { func NewSimpleCombiner(req *tempopb.QueryRangeRequest, op SimpleAggregationOp) *SimpleAggregator { l := IntervalCount(req.Start, req.End, req.Step) var initWithNaN bool - var f func(b *SimpleAggregator, serie string, pos int, newValue float64) + var f func(b *SimpleAggregator, promLabel string, pos int, newValue float64) switch op { case minAggregation: // Simple min aggregator. It calculates the minimum between existing values and a new sample - f = func(b *SimpleAggregator, serie string, pos int, newValue float64) { - existingValue := b.ss[serie].Values[pos] + f = func(b *SimpleAggregator, promLabel string, pos int, newValue float64) { + existingValue := b.ss[promLabel].Values[pos] if math.IsNaN(existingValue) || newValue < existingValue { - b.ss[serie].Values[pos] = newValue + b.ss[promLabel].Values[pos] = newValue } } initWithNaN = true case maxAggregation: // Simple max aggregator. It calculates the maximum between existing values and a new sample - f = func(b *SimpleAggregator, serie string, pos int, newValue float64) { - existingValue := b.ss[serie].Values[pos] + f = func(b *SimpleAggregator, promLabel string, pos int, newValue float64) { + existingValue := b.ss[promLabel].Values[pos] if math.IsNaN(existingValue) || newValue > existingValue { - b.ss[serie].Values[pos] = newValue + b.ss[promLabel].Values[pos] = newValue } } initWithNaN = true case avgAggregation: // Simple average aggregator. It calculates the average between existing values and a new sample - f = func(b *SimpleAggregator, serie string, pos int, inc float64) { - b.ssCounter[serie][pos]++ - mean := b.ss[serie].Values[pos] - count := b.ssCounter[serie][pos] - compensation := b.ssCompensation[serie][pos] + f = func(b *SimpleAggregator, promLabel string, pos int, inc float64) { + b.ssCounter[promLabel][pos]++ + mean := b.ss[promLabel].Values[pos] + count := b.ssCounter[promLabel][pos] + compensation := b.ssCompensation[promLabel][pos] mean, c := averageInc(mean, inc, count, compensation) - b.ssCompensation[serie][pos] = c - b.ss[serie].Values[pos] = mean + b.ssCompensation[promLabel][pos] = c + b.ss[promLabel].Values[pos] = mean } initWithNaN = true default: // Simple addition aggregator. It adds existing values with the new sample. - f = func(b *SimpleAggregator, serie string, pos int, newValue float64) { - b.ss[serie].Values[pos] += newValue + f = func(b *SimpleAggregator, promLabel string, pos int, newValue float64) { + b.ss[promLabel].Values[pos] += newValue } initWithNaN = false From 4b68942ac89d4a638c654b842cbbeaa875b90a05 Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Thu, 12 Sep 2024 16:59:08 +0200 Subject: [PATCH 04/29] fix test --- pkg/traceql/engine_metrics_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/traceql/engine_metrics_test.go b/pkg/traceql/engine_metrics_test.go index b7fc33edc0a..7834229e817 100644 --- a/pkg/traceql/engine_metrics_test.go +++ b/pkg/traceql/engine_metrics_test.go @@ -652,7 +652,7 @@ func TestAvgOverTimeForDuration(t *testing.T) { assert.Equal(t, 200., fooBaz.Values[2]*float64(time.Second)) assert.Equal(t, 100., fooBar.Values[0]*float64(time.Second)) - assert.Equal(t, 400., fooBar.Values[1]*float64(time.Second)) + assert.Equal(t, 200., fooBar.Values[1]*float64(time.Second)) assert.True(t, math.IsNaN(fooBar.Values[2])) } From 67c0111eec77aa21e83cf5102510ef2dba27d779 Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Thu, 10 Oct 2024 17:10:58 +0200 Subject: [PATCH 05/29] new average engine --- pkg/traceql/ast.go | 7 - pkg/traceql/engine_metrics.go | 126 ++------ pkg/traceql/engine_metrics_average.go | 405 ++++++++++++++++++++++++++ pkg/traceql/engine_metrics_test.go | 101 ++++++- pkg/traceql/expr.y | 4 +- pkg/traceql/expr.y.go | 4 +- 6 files changed, 530 insertions(+), 117 deletions(-) create mode 100644 pkg/traceql/engine_metrics_average.go diff --git a/pkg/traceql/ast.go b/pkg/traceql/ast.go index 065c05ccdc7..67d6770ff14 100644 --- a/pkg/traceql/ast.go +++ b/pkg/traceql/ast.go @@ -1121,12 +1121,6 @@ func (a *MetricsAggregate) init(q *tempopb.QueryRangeRequest, mode AggregateMode exemplarFn = func(s Span) (float64, uint64) { return math.NaN(), a.spanStartTimeMs(s) } - case metricsAggregateAvgOverTime: - innerAgg = func() VectorAggregator { return NewOverTimeAggregator(a.attr, avgAggregation) } - a.simpleAggregationOp = avgAggregation - exemplarFn = func(s Span) (float64, uint64) { - return math.NaN(), a.spanStartTimeMs(s) - } case metricsAggregateRate: innerAgg = func() VectorAggregator { return NewRateAggregator(1.0 / time.Duration(q.Step).Seconds()) } a.simpleAggregationOp = sumAggregation @@ -1253,7 +1247,6 @@ func (a *MetricsAggregate) validate() error { case metricsAggregateCountOverTime: case metricsAggregateMinOverTime: case metricsAggregateMaxOverTime: - case metricsAggregateAvgOverTime: case metricsAggregateRate: case metricsAggregateHistogramOverTime: if len(a.by) >= maxGroupBys { diff --git a/pkg/traceql/engine_metrics.go b/pkg/traceql/engine_metrics.go index 3864f7c7383..7c711fe8fbf 100644 --- a/pkg/traceql/engine_metrics.go +++ b/pkg/traceql/engine_metrics.go @@ -345,39 +345,30 @@ func (c *CountOverTimeAggregator) Sample() float64 { // calculate the rate when given a multiplier. type OverTimeAggregator struct { getSpanAttValue func(s Span) float64 - agg func(current *OverTimeAggregator, new float64) + agg func(current, new float64) float64 val float64 - count float64 // number of spans - - // Only for computing the average - c float64 // compesation for Kahan summation } var _ VectorAggregator = (*OverTimeAggregator)(nil) func NewOverTimeAggregator(attr Attribute, op SimpleAggregationOp) *OverTimeAggregator { var fn func(s Span) float64 - var agg func(current *OverTimeAggregator, new float64) + var agg func(current, new float64) float64 switch op { case maxAggregation: - agg = func(current *OverTimeAggregator, new float64) { - if math.IsNaN(current.val) || new > current.val { - current.val = new + agg = func(current, new float64) float64 { + if math.IsNaN(current) || new > current { + return new } + return current } case minAggregation: - agg = func(current *OverTimeAggregator, new float64) { - if math.IsNaN(current.val) || new < current.val { - current.val = new + agg = func(current, new float64) float64 { + if math.IsNaN(current) || new < current { + return new } - } - case avgAggregation: - agg = func(current *OverTimeAggregator, inc float64) { - current.count++ - mean, c := averageInc(current.val, inc, current.count, current.c) - current.c = c - current.val = mean + return current } } @@ -404,54 +395,13 @@ func NewOverTimeAggregator(attr Attribute, op SimpleAggregationOp) *OverTimeAggr } func (c *OverTimeAggregator) Observe(s Span) { - c.agg(c, c.getSpanAttValue(s)) + c.val = c.agg(c.val, c.getSpanAttValue(s)) } func (c *OverTimeAggregator) Sample() float64 { return c.val } -func averageInc(mean, inc, count, compensation float64) (float64, float64) { - if math.IsNaN(mean) && !math.IsNaN(inc) { - // When we have a proper value in the span we need to initialize to 0 - mean = 0 - } - if math.IsInf(mean, 0) { - if math.IsInf(inc, 0) && (mean > 0) == (inc > 0) { - // The `current.val` and `new` values are `Inf` of the same sign. They - // can't be subtracted, but the value of `current.val` is correct - // already. - return mean, compensation - } - if !math.IsInf(inc, 0) && !math.IsNaN(inc) { - // At this stage, the current.val is an infinite. If the added - // value is neither an Inf or a Nan, we can keep that mean - // value. - // This is required because our calculation below removes - // the mean value, which would look like Inf += x - Inf and - // end up as a NaN. - return mean, compensation - } - } - mean, c := kahanSumInc(inc/count-mean/count, mean, compensation) - return mean, c -} - -func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { - t := sum + inc - switch { - case math.IsInf(t, 0): - c = 0 - - // Using Neumaier improvement, swap if next term larger than sum. - case math.Abs(sum) >= math.Abs(inc): - c += (sum - t) + inc - default: - c += (inc - t) + sum - } - return t, c -} - // StepAggregator sorts spans into time slots using a step interval like 30s or 1m type StepAggregator struct { start, end, step uint64 @@ -1146,67 +1096,43 @@ const ( sumAggregation SimpleAggregationOp = iota minAggregation maxAggregation - avgAggregation ) type SimpleAggregator struct { ss SeriesSet exemplarBuckets *bucketSet len int - aggregationFunc func(b *SimpleAggregator, promLabel string, pos int, newValue float64) + aggregationFunc func(existingValue float64, newValue float64) float64 start, end, step uint64 initWithNaN bool - - // Only for average - initAvg bool - ssCounter map[string]map[int]float64 // Counter of processed elements to calculate median - ssCompensation map[string]map[int]float64 // Avg compensation } func NewSimpleCombiner(req *tempopb.QueryRangeRequest, op SimpleAggregationOp) *SimpleAggregator { l := IntervalCount(req.Start, req.End, req.Step) var initWithNaN bool - var f func(b *SimpleAggregator, promLabel string, pos int, newValue float64) + var f func(existingValue float64, newValue float64) float64 switch op { case minAggregation: // Simple min aggregator. It calculates the minimum between existing values and a new sample - f = func(b *SimpleAggregator, promLabel string, pos int, newValue float64) { - existingValue := b.ss[promLabel].Values[pos] + f = func(existingValue float64, newValue float64) float64 { if math.IsNaN(existingValue) || newValue < existingValue { - b.ss[promLabel].Values[pos] = newValue + return newValue } + return existingValue } initWithNaN = true case maxAggregation: // Simple max aggregator. It calculates the maximum between existing values and a new sample - f = func(b *SimpleAggregator, promLabel string, pos int, newValue float64) { - existingValue := b.ss[promLabel].Values[pos] + f = func(existingValue float64, newValue float64) float64 { if math.IsNaN(existingValue) || newValue > existingValue { - b.ss[promLabel].Values[pos] = newValue + return newValue } + return existingValue } initWithNaN = true - - case avgAggregation: - // Simple average aggregator. It calculates the average between existing values and a new sample - f = func(b *SimpleAggregator, promLabel string, pos int, inc float64) { - b.ssCounter[promLabel][pos]++ - mean := b.ss[promLabel].Values[pos] - count := b.ssCounter[promLabel][pos] - compensation := b.ssCompensation[promLabel][pos] - - mean, c := averageInc(mean, inc, count, compensation) - - b.ssCompensation[promLabel][pos] = c - b.ss[promLabel].Values[pos] = mean - } - initWithNaN = true - default: // Simple addition aggregator. It adds existing values with the new sample. - f = func(b *SimpleAggregator, promLabel string, pos int, newValue float64) { - b.ss[promLabel].Values[pos] += newValue - } + f = func(existingValue float64, newValue float64) float64 { return existingValue + newValue } initWithNaN = false } @@ -1219,7 +1145,6 @@ func NewSimpleCombiner(req *tempopb.QueryRangeRequest, op SimpleAggregationOp) * step: req.Step, aggregationFunc: f, initWithNaN: initWithNaN, - initAvg: (op == avgAggregation), } } @@ -1250,21 +1175,12 @@ func (b *SimpleAggregator) Combine(in []*tempopb.TimeSeries) { } b.ss[ts.PromLabels] = existing - if b.initAvg { - if b.ssCounter == nil { - b.ssCounter = map[string]map[int]float64{} - b.ssCompensation = map[string]map[int]float64{} - } - - b.ssCounter[ts.PromLabels] = make(map[int]float64, b.len) - b.ssCompensation[ts.PromLabels] = make(map[int]float64, b.len) - } } for _, sample := range ts.Samples { j := IntervalOfMs(sample.TimestampMs, b.start, b.end, b.step) if j >= 0 && j < len(existing.Values) { - b.aggregationFunc(b, ts.PromLabels, j, sample.Value) + existing.Values[j] = b.aggregationFunc(existing.Values[j], sample.Value) } } diff --git a/pkg/traceql/engine_metrics_average.go b/pkg/traceql/engine_metrics_average.go new file mode 100644 index 00000000000..b170e993a20 --- /dev/null +++ b/pkg/traceql/engine_metrics_average.go @@ -0,0 +1,405 @@ +package traceql + +import ( + "fmt" + "math" + "strings" + "time" + + "github.com/grafana/tempo/pkg/tempopb" + v1 "github.com/grafana/tempo/pkg/tempopb/common/v1" + "github.com/prometheus/prometheus/model/labels" +) + +type AvgSeries[S StaticVals] struct { + avg []float64 + count []float64 + compensation []float64 + init bool + vals S +} + +type AvgAggregator[F FastStatic, S StaticVals] struct { + // Config + by []Attribute // Original attributes: .foo + byLookups [][]Attribute // Lookups: span.foo resource.foo + getSpanAttValue func(s Span) float64 + start uint64 + end uint64 + step uint64 + + // Data + series map[F]AvgSeries[S] + lastSeries AvgSeries[S] + buf fastStaticWithValues[F, S] + lastBuf fastStaticWithValues[F, S] +} + +type SimpleAverageSeriesAggregator struct { + ss SeriesSet + countProm map[string]string + len int + start, end, step uint64 +} + +var ( + _ SpanAggregator = (*AvgAggregator[FastStatic1, StaticVals1])(nil) + _ metricsFirstStageElement = (*MetricsAverageAggregate)(nil) + _ SeriesAggregator = (*SimpleAverageSeriesAggregator)(nil) +) + +func (b *SimpleAverageSeriesAggregator) Combine(in []*tempopb.TimeSeries) { + newCountersTS := make(map[string][]float64) + nan := math.Float64frombits(normalNaN) + + for _, ts := range in { + counterPromLabel := "" + if strings.Contains(ts.PromLabels, "_type") { + counterPromLabel = getLabels(ts.Labels, "_type").String() + newCountersTS[counterPromLabel] = make([]float64, b.len) + for i, sample := range ts.Samples { + newCountersTS[counterPromLabel][i] = sample.Value + } + } + _, ok := b.ss[ts.PromLabels] + if !ok { + labels := getLabels(ts.Labels, "") + n := TimeSeries{ + Labels: labels, + Values: make([]float64, b.len), + Exemplars: make([]Exemplar, 0, len(ts.Exemplars)), + } + if counterPromLabel != "" { + b.countProm[counterPromLabel] = ts.PromLabels + } else { + for i := range n.Values { + n.Values[i] = nan + } + } + b.ss[ts.PromLabels] = n + } + } + for _, ts := range in { + counterLabel, ok := b.countProm[ts.PromLabels] + if !ok { + // This is a counter label, we can skip it + continue + } + for _, sample := range ts.Samples { + pos := IntervalOfMs(sample.TimestampMs, b.start, b.end, b.step) + if pos < 0 || pos > len(b.ss[ts.PromLabels].Values) { + continue + } + + currentAvg := b.ss[ts.PromLabels].Values[pos] + newAvg := sample.Value + currentCount := b.ss[counterLabel].Values[pos] + newCount := newCountersTS[ts.PromLabels][pos] + + if math.IsNaN(currentAvg) && !math.IsNaN(newAvg) { + b.ss[ts.PromLabels].Values[pos] = newAvg + b.ss[counterLabel].Values[pos] = newCount + } else if !math.IsNaN(newAvg) { + avg := (currentAvg*currentCount + newAvg*newCount) / (currentCount + newCount) + b.ss[ts.PromLabels].Values[pos] = avg + b.ss[counterLabel].Values[pos] = currentCount + newCount + } + } + } +} + +func getLabels(vals []v1.KeyValue, skipKey string) Labels { + labels := make(Labels, 0, len(vals)) + for _, l := range vals { + if skipKey != "" && l.Key == skipKey { + continue + } + labels = append(labels, Label{ + Name: l.Key, + Value: StaticFromAnyValue(l.Value), + }) + } + return labels +} + +func (b *SimpleAverageSeriesAggregator) Results() SeriesSet { + return b.ss +} + +func (g *AvgAggregator[F, S]) Observe(span Span) { + if !g.getGroupingValues(span) { + return + } + + s := g.getSeries() + interval := IntervalOf(span.StartTimeUnixNanos(), g.start, g.end, g.step) + if interval == -1 { + return + } + inc := g.getSpanAttValue(span) + if math.IsNaN(inc) { + return + } + s.count[interval]++ + mean, c := averageInc(s.avg[interval], inc, s.count[interval], s.compensation[interval]) + s.avg[interval] = mean + s.compensation[interval] = c +} + +func (g *AvgAggregator[F, S]) ObserveExemplar(_ Span, _ float64, _ uint64) { +} + +func (g *AvgAggregator[F, S]) labelsFor(vals S, t string) (Labels, string) { + if g.by == nil { + serieLabel := make(Labels, 2) + serieLabel[0] = Label{labels.MetricName, NewStaticString(metricsAggregateAvgOverTime.String())} + if t != "" { + serieLabel[1] = Label{"_type", NewStaticString(t)} + } + return serieLabel, serieLabel.String() + } + labels := make(Labels, 0, len(g.by)+1) + for i := range g.by { + if vals[i].Type == TypeNil { + continue + } + labels = append(labels, Label{g.by[i].String(), vals[i]}) + } + + if len(labels) == 0 { + // When all nil then force one + labels = append(labels, Label{g.by[0].String(), NewStaticNil()}) + } + + if t != "" { + labels = append(labels, Label{"_type", NewStaticString(t)}) + } + + return labels, labels.String() +} + +func (g *AvgAggregator[F, S]) Series() SeriesSet { + ss := SeriesSet{} + + for _, s := range g.series { + labels, promLabelsAvg := g.labelsFor(s.vals, "") + + ss[promLabelsAvg] = TimeSeries{ + Labels: labels, + Values: s.avg, + Exemplars: []Exemplar{}, + } + + labels, promLabelsCount := g.labelsFor(s.vals, "count") + ss[promLabelsCount] = TimeSeries{ + Labels: labels, + Values: s.count, + Exemplars: []Exemplar{}, + } + } + + return ss +} + +func (g *AvgAggregator[F, S]) getGroupingValues(span Span) bool { + for i, lookups := range g.byLookups { + val := lookup(lookups, span) + g.buf.vals[i] = val + g.buf.fast[i] = val.MapKey() + } + return true +} + +// getSeries gets the series for the current span. +// It will reuse the last series if possible. +func (g *AvgAggregator[F, S]) getSeries() AvgSeries[S] { + // Fast path + if g.lastSeries.init && g.lastBuf.fast == g.buf.fast { + return g.lastSeries + } + + s, ok := g.series[g.buf.fast] + if !ok { + intervals := IntervalCount(g.start, g.end, g.step) + s = AvgSeries[S]{ + init: true, + vals: g.buf.vals, + count: make([]float64, intervals), + avg: make([]float64, intervals), + compensation: make([]float64, intervals), + } + for i := 0; i < intervals; i++ { + s.avg[i] = math.Float64frombits(normalNaN) + } + + g.series[g.buf.fast] = s + } + + g.lastBuf = g.buf + g.lastSeries = s + return s +} + +func NewAvgAggregator(attr Attribute, by []Attribute, start, end, step uint64) SpanAggregator { + lookups := make([][]Attribute, len(by)) + for i, attr := range by { + if attr.Intrinsic == IntrinsicNone && attr.Scope == AttributeScopeNone { + // Unscoped attribute. Check span-level, then resource-level. + // TODO - Is this taken care of by span.AttributeFor now? + lookups[i] = []Attribute{ + NewScopedAttribute(AttributeScopeSpan, false, attr.Name), + NewScopedAttribute(AttributeScopeResource, false, attr.Name), + } + } else { + lookups[i] = []Attribute{attr} + } + } + + aggNum := len(lookups) + + switch aggNum { + case 2: + return newAvgAggregator[FastStatic2, StaticVals2](attr, by, lookups, start, end, step) + case 3: + return newAvgAggregator[FastStatic3, StaticVals3](attr, by, lookups, start, end, step) + case 4: + return newAvgAggregator[FastStatic4, StaticVals4](attr, by, lookups, start, end, step) + case 5: + return newAvgAggregator[FastStatic5, StaticVals5](attr, by, lookups, start, end, step) + default: + return newAvgAggregator[FastStatic1, StaticVals1](attr, by, lookups, start, end, step) + } +} + +func newAvgAggregator[F FastStatic, S StaticVals](attr Attribute, by []Attribute, lookups [][]Attribute, start, end, step uint64) SpanAggregator { + var fn func(s Span) float64 + + switch attr { + case IntrinsicDurationAttribute: + fn = func(s Span) float64 { + return float64(s.DurationNanos()) / float64(time.Second) + } + default: + fn = func(s Span) float64 { + f, a := FloatizeAttribute(s, attr) + if a == TypeNil { + return math.Float64frombits(normalNaN) + } + return f + } + } + + return &AvgAggregator[F, S]{ + series: map[F]AvgSeries[S]{}, + getSpanAttValue: fn, + by: by, + byLookups: lookups, + start: start, + end: end, + step: step, + } +} + +type MetricsAverageAggregate struct { + by []Attribute + attr Attribute + agg SpanAggregator + seriesAgg SeriesAggregator + exemplarFn getExemplar + mode AggregateMode +} + +var _ metricsFirstStageElement = (*MetricsAverageAggregate)(nil) + +func newMetricsAverageAggregateWithAttr(attr Attribute, by []Attribute) *MetricsAverageAggregate { + return &MetricsAverageAggregate{ + attr: attr, + by: by, + } +} + +func (a *MetricsAverageAggregate) init(q *tempopb.QueryRangeRequest, mode AggregateMode) { + exemplarFn := func(s Span) (float64, uint64) { + return math.NaN(), a.spanStartTimeMs(s) + } + + a.seriesAgg = &SimpleAverageSeriesAggregator{ + ss: make(SeriesSet), + countProm: make(map[string]string), + len: IntervalCount(q.Start, q.End, q.Step), + start: q.Start, + end: q.End, + step: q.Step, + } + + if mode == AggregateModeRaw { + a.agg = NewAvgAggregator(a.attr, a.by, q.Start, q.End, q.Step) + } + + a.exemplarFn = exemplarFn + a.mode = mode +} + +func (a *MetricsAverageAggregate) observe(span Span) { + a.agg.Observe(span) +} + +func (a *MetricsAverageAggregate) observeExemplar(span Span) { + v, ts := a.exemplarFn(span) + a.agg.ObserveExemplar(span, v, ts) +} + +func (a *MetricsAverageAggregate) observeSeries(ss []*tempopb.TimeSeries) { + a.seriesAgg.Combine(ss) +} + +func (a *MetricsAverageAggregate) result() SeriesSet { + if a.agg != nil { + return a.agg.Series() + } + + // In the frontend-version the results come from + // the job-level aggregator + ss := a.seriesAgg.Results() + if a.mode == AggregateModeFinal { + for i := range ss { + if strings.Contains(i, "_type") { + delete(ss, i) + } + } + } + return ss +} + +func (a *MetricsAverageAggregate) extractConditions(request *FetchSpansRequest) { + // For metrics aggregators based on a span attribute we have to include it + includeAttribute := a.attr != (Attribute{}) && !request.HasAttribute(a.attr) + if includeAttribute { + request.SecondPassConditions = append(request.SecondPassConditions, Condition{ + Attribute: a.attr, + }) + } + + for _, b := range a.by { + if !request.HasAttribute(b) { + request.SecondPassConditions = append(request.SecondPassConditions, Condition{ + Attribute: b, + }) + } + } +} + +func (a *MetricsAverageAggregate) validate() error { + if len(a.by) >= maxGroupBys { + return newUnsupportedError(fmt.Sprintf("metrics group by %v values", len(a.by))) + } + return nil +} + +func (a *MetricsAverageAggregate) spanStartTimeMs(s Span) uint64 { + return s.StartTimeUnixNanos() / uint64(time.Millisecond) +} + +func (a *MetricsAverageAggregate) String() string { + return "avg(" + a.attr.String() + ")" +} diff --git a/pkg/traceql/engine_metrics_test.go b/pkg/traceql/engine_metrics_test.go index 7834229e817..eb8fae70bab 100644 --- a/pkg/traceql/engine_metrics_test.go +++ b/pkg/traceql/engine_metrics_test.go @@ -656,6 +656,45 @@ func TestAvgOverTimeForDuration(t *testing.T) { assert.True(t, math.IsNaN(fooBar.Values[2])) } +func TestAvgOverTimeForDuration2(t *testing.T) { + req := &tempopb.QueryRangeRequest{ + Start: uint64(1 * time.Second), + End: uint64(3 * time.Second), + Step: uint64(1 * time.Second), + Query: "{ } | avg_over_time(duration)", + } + + // A variety of spans across times, durations, and series. All durations are powers of 2 for simplicity + in := []Span{ + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithDuration(100), + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithDuration(100), + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithDuration(100), + + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithDuration(100), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithDuration(100), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithDuration(100), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithDuration(500), + + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithDuration(100), + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithDuration(200), + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithDuration(300), + } + + result := runTraceQLMetric(t, req, in) + + fooBaz := result[`{span.foo="baz"}`] + fooBar := result[`{span.foo="bar"}`] + + // We cannot compare with require.Equal because NaN != NaN + assert.True(t, math.IsNaN(fooBaz.Values[0])) + assert.True(t, math.IsNaN(fooBaz.Values[1])) + assert.Equal(t, 200., fooBaz.Values[2]*float64(time.Second)) + + assert.Equal(t, 100., fooBar.Values[0]*float64(time.Second)) + assert.Equal(t, 200., fooBar.Values[1]*float64(time.Second)) + assert.True(t, math.IsNaN(fooBar.Values[2])) +} + func TestAvgOverTimeForSpanAttribute(t *testing.T) { req := &tempopb.QueryRangeRequest{ Start: uint64(1 * time.Second), @@ -971,6 +1010,67 @@ func TestHistogramOverTime(t *testing.T) { require.Equal(t, out, result) } +func TestObserveSeriesAverageOverTimeForSpanAttribute(t *testing.T) { + req := &tempopb.QueryRangeRequest{ + Start: uint64(1 * time.Second), + End: uint64(3 * time.Second), + Step: uint64(1 * time.Second), + Query: "{ } | avg_over_time(span.http.status_code) by (span.foo)", + } + + // A variety of spans across times, durations, and series. All durations are powers of 2 for simplicity + in := []Span{ + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200), + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 300), + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 400), + + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 100), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 100), + + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 201), + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 401), + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 500), + } + + in2 := []Span{ + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 100), + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 300), + + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 400), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200), + + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 400), + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 100), + } + + e := NewEngine() + layer1A, _ := e.CompileMetricsQueryRange(req, 0, 0, false) + layer1B, _ := e.CompileMetricsQueryRange(req, 0, 0, false) + layer2A, _ := e.CompileMetricsQueryRangeNonRaw(req, AggregateModeSum) + layer2B, _ := e.CompileMetricsQueryRangeNonRaw(req, AggregateModeSum) + layer3, _ := e.CompileMetricsQueryRangeNonRaw(req, AggregateModeFinal) + + for _, s := range in { + layer1A.metricsPipeline.observe(s) + } + + layer2A.ObserveSeries(layer1A.Results().ToProto(req)) + + for _, s := range in2 { + layer1B.metricsPipeline.observe(s) + } + + layer2B.ObserveSeries(layer1B.Results().ToProto(req)) + + layer3.ObserveSeries(layer2A.Results().ToProto(req)) + layer3.ObserveSeries(layer2B.Results().ToProto(req)) + + res := layer3.Results().ToProto(req) + assert.NotNil(t, res) +} + func runTraceQLMetric(t *testing.T, req *tempopb.QueryRangeRequest, inSpans ...[]Span) SeriesSet { e := NewEngine() @@ -996,7 +1096,6 @@ func runTraceQLMetric(t *testing.T, req *tempopb.QueryRangeRequest, inSpans ...[ // These are summed counts over time by bucket res := layer2.Results() layer3.ObserveSeries(res.ToProto(req)) - // Layer 3 final results return layer3.Results() diff --git a/pkg/traceql/expr.y b/pkg/traceql/expr.y index 69faadaa8bb..efdb4a06558 100644 --- a/pkg/traceql/expr.y +++ b/pkg/traceql/expr.y @@ -302,8 +302,8 @@ metricsAggregation: | MIN_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS BY OPEN_PARENS attributeList CLOSE_PARENS { $$ = newMetricsAggregateWithAttr(metricsAggregateMinOverTime, $3, $7) } | MAX_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS { $$ = newMetricsAggregateWithAttr(metricsAggregateMaxOverTime, $3, nil) } | MAX_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS BY OPEN_PARENS attributeList CLOSE_PARENS { $$ = newMetricsAggregateWithAttr(metricsAggregateMaxOverTime, $3, $7) } - | AVG_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS { $$ = newMetricsAggregateWithAttr(metricsAggregateAvgOverTime, $3, nil) } - | AVG_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS BY OPEN_PARENS attributeList CLOSE_PARENS { $$ = newMetricsAggregateWithAttr(metricsAggregateAvgOverTime, $3, $7) } + | AVG_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS { $$ = newMetricsAverageAggregateWithAttr($3, nil) } + | AVG_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS BY OPEN_PARENS attributeList CLOSE_PARENS { $$ = newMetricsAverageAggregateWithAttr($3, $7) } | QUANTILE_OVER_TIME OPEN_PARENS attribute COMMA numericList CLOSE_PARENS { $$ = newMetricsAggregateQuantileOverTime($3, $5, nil) } | QUANTILE_OVER_TIME OPEN_PARENS attribute COMMA numericList CLOSE_PARENS BY OPEN_PARENS attributeList CLOSE_PARENS { $$ = newMetricsAggregateQuantileOverTime($3, $5, $9) } | HISTOGRAM_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS { $$ = newMetricsAggregateWithAttr(metricsAggregateHistogramOverTime, $3, nil) } diff --git a/pkg/traceql/expr.y.go b/pkg/traceql/expr.y.go index 2846952a876..915f9197de2 100644 --- a/pkg/traceql/expr.y.go +++ b/pkg/traceql/expr.y.go @@ -1613,13 +1613,13 @@ yydefault: yyDollar = yyS[yypt-4 : yypt+1] //line pkg/traceql/expr.y:305 { - yyVAL.metricsAggregation = newMetricsAggregateWithAttr(metricsAggregateAvgOverTime, yyDollar[3].attribute, nil) + yyVAL.metricsAggregation = newMetricsAverageAggregateWithAttr(yyDollar[3].attribute, nil) } case 115: yyDollar = yyS[yypt-8 : yypt+1] //line pkg/traceql/expr.y:306 { - yyVAL.metricsAggregation = newMetricsAggregateWithAttr(metricsAggregateAvgOverTime, yyDollar[3].attribute, yyDollar[7].attributeList) + yyVAL.metricsAggregation = newMetricsAverageAggregateWithAttr(yyDollar[3].attribute, yyDollar[7].attributeList) } case 116: yyDollar = yyS[yypt-6 : yypt+1] From 18583d7f70a2190b0a426b02f17c8eddd399a237 Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Thu, 10 Oct 2024 17:12:53 +0200 Subject: [PATCH 06/29] added missed method --- pkg/traceql/engine_metrics_average.go | 41 +++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/pkg/traceql/engine_metrics_average.go b/pkg/traceql/engine_metrics_average.go index b170e993a20..6b62df6953b 100644 --- a/pkg/traceql/engine_metrics_average.go +++ b/pkg/traceql/engine_metrics_average.go @@ -146,6 +146,47 @@ func (g *AvgAggregator[F, S]) Observe(span Span) { s.compensation[interval] = c } +func averageInc(mean, inc, count, compensation float64) (float64, float64) { + if math.IsNaN(mean) && !math.IsNaN(inc) { + // When we have a proper value in the span we need to initialize to 0 + mean = 0 + } + if math.IsInf(mean, 0) { + if math.IsInf(inc, 0) && (mean > 0) == (inc > 0) { + // The `current.val` and `new` values are `Inf` of the same sign. They + // can't be subtracted, but the value of `current.val` is correct + // already. + return mean, compensation + } + if !math.IsInf(inc, 0) && !math.IsNaN(inc) { + // At this stage, the current.val is an infinite. If the added + // value is neither an Inf or a Nan, we can keep that mean + // value. + // This is required because our calculation below removes + // the mean value, which would look like Inf += x - Inf and + // end up as a NaN. + return mean, compensation + } + } + mean, c := kahanSumInc(inc/count-mean/count, mean, compensation) + return mean, c +} + +func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { + t := sum + inc + switch { + case math.IsInf(t, 0): + c = 0 + + // Using Neumaier improvement, swap if next term larger than sum. + case math.Abs(sum) >= math.Abs(inc): + c += (sum - t) + inc + default: + c += (inc - t) + sum + } + return t, c +} + func (g *AvgAggregator[F, S]) ObserveExemplar(_ Span, _ float64, _ uint64) { } From 700f21b51e49fe1141595154fc06798d36e95638 Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Thu, 10 Oct 2024 17:46:14 +0200 Subject: [PATCH 07/29] fix tests --- pkg/traceql/engine_metrics_average.go | 408 +++++++++++++------------- pkg/traceql/engine_metrics_test.go | 155 +++++----- pkg/traceql/expr.y | 4 +- pkg/traceql/expr.y.go | 4 +- 4 files changed, 291 insertions(+), 280 deletions(-) diff --git a/pkg/traceql/engine_metrics_average.go b/pkg/traceql/engine_metrics_average.go index 6b62df6953b..b35a8225b00 100644 --- a/pkg/traceql/engine_metrics_average.go +++ b/pkg/traceql/engine_metrics_average.go @@ -11,44 +11,123 @@ import ( "github.com/prometheus/prometheus/model/labels" ) -type AvgSeries[S StaticVals] struct { - avg []float64 - count []float64 - compensation []float64 - init bool - vals S +// Average over time aggregator +type AverageOverTimeAggregator struct { + by []Attribute + attr Attribute + // Average over time span aggregator + agg SpanAggregator + // Average over time series aggregator + seriesAgg SeriesAggregator + exemplarFn getExemplar + mode AggregateMode } -type AvgAggregator[F FastStatic, S StaticVals] struct { - // Config - by []Attribute // Original attributes: .foo - byLookups [][]Attribute // Lookups: span.foo resource.foo - getSpanAttValue func(s Span) float64 - start uint64 - end uint64 - step uint64 +var _ metricsFirstStageElement = (*AverageOverTimeAggregator)(nil) - // Data - series map[F]AvgSeries[S] - lastSeries AvgSeries[S] - buf fastStaticWithValues[F, S] - lastBuf fastStaticWithValues[F, S] +func newAverageOverTimeMetricsAggregator(attr Attribute, by []Attribute) *AverageOverTimeAggregator { + return &AverageOverTimeAggregator{ + attr: attr, + by: by, + } +} + +func (a *AverageOverTimeAggregator) init(q *tempopb.QueryRangeRequest, mode AggregateMode) { + exemplarFn := func(s Span) (float64, uint64) { + return math.NaN(), a.spanStartTimeMs(s) + } + + a.seriesAgg = &SpanSetsAverageOverTimeAggregator{ + ss: make(SeriesSet), + countProm: make(map[string]string), + len: IntervalCount(q.Start, q.End, q.Step), + start: q.Start, + end: q.End, + step: q.Step, + } + + if mode == AggregateModeRaw { + a.agg = NewAvgOverTimeSpanAggregator(a.attr, a.by, q.Start, q.End, q.Step) + } + + a.exemplarFn = exemplarFn + a.mode = mode +} + +func (a *AverageOverTimeAggregator) observe(span Span) { + a.agg.Observe(span) +} + +func (a *AverageOverTimeAggregator) observeExemplar(span Span) { + v, ts := a.exemplarFn(span) + a.agg.ObserveExemplar(span, v, ts) +} + +func (a *AverageOverTimeAggregator) observeSeries(ss []*tempopb.TimeSeries) { + a.seriesAgg.Combine(ss) +} + +func (a *AverageOverTimeAggregator) result() SeriesSet { + if a.agg != nil { + return a.agg.Series() + } + + // In the frontend-version the results come from + // the job-level aggregator + ss := a.seriesAgg.Results() + if a.mode == AggregateModeFinal { + for i := range ss { + if strings.Contains(i, "_type") { + delete(ss, i) + } + } + } + return ss +} + +func (a *AverageOverTimeAggregator) extractConditions(request *FetchSpansRequest) { + // For metrics aggregators based on a span attribute we have to include it + includeAttribute := a.attr != (Attribute{}) && !request.HasAttribute(a.attr) + if includeAttribute { + request.SecondPassConditions = append(request.SecondPassConditions, Condition{ + Attribute: a.attr, + }) + } + + for _, b := range a.by { + if !request.HasAttribute(b) { + request.SecondPassConditions = append(request.SecondPassConditions, Condition{ + Attribute: b, + }) + } + } +} + +func (a *AverageOverTimeAggregator) validate() error { + if len(a.by) >= maxGroupBys { + return newUnsupportedError(fmt.Sprintf("metrics group by %v values", len(a.by))) + } + return nil } -type SimpleAverageSeriesAggregator struct { +func (a *AverageOverTimeAggregator) spanStartTimeMs(s Span) uint64 { + return s.StartTimeUnixNanos() / uint64(time.Millisecond) +} + +func (a *AverageOverTimeAggregator) String() string { + return "avg(" + a.attr.String() + ")" +} + +type SpanSetsAverageOverTimeAggregator struct { ss SeriesSet countProm map[string]string len int start, end, step uint64 } -var ( - _ SpanAggregator = (*AvgAggregator[FastStatic1, StaticVals1])(nil) - _ metricsFirstStageElement = (*MetricsAverageAggregate)(nil) - _ SeriesAggregator = (*SimpleAverageSeriesAggregator)(nil) -) +var _ SeriesAggregator = (*SpanSetsAverageOverTimeAggregator)(nil) -func (b *SimpleAverageSeriesAggregator) Combine(in []*tempopb.TimeSeries) { +func (b *SpanSetsAverageOverTimeAggregator) Combine(in []*tempopb.TimeSeries) { newCountersTS := make(map[string][]float64) nan := math.Float64frombits(normalNaN) @@ -122,11 +201,100 @@ func getLabels(vals []v1.KeyValue, skipKey string) Labels { return labels } -func (b *SimpleAverageSeriesAggregator) Results() SeriesSet { +func (b *SpanSetsAverageOverTimeAggregator) Results() SeriesSet { return b.ss } -func (g *AvgAggregator[F, S]) Observe(span Span) { +// Accumulated results of average over time +type AvgOverTimeSeries[S StaticVals] struct { + avg []float64 + count []float64 + compensation []float64 + init bool + vals S +} + +// In charge of calculating the average over time for a set of spans +// First aggregation layer +type AvgOverTimeSpanAggregator[F FastStatic, S StaticVals] struct { + // Config + by []Attribute // Original attributes: .foo + byLookups [][]Attribute // Lookups: span.foo resource.foo + getSpanAttValue func(s Span) float64 + start uint64 + end uint64 + step uint64 + + // Data + series map[F]AvgOverTimeSeries[S] + lastSeries AvgOverTimeSeries[S] + buf fastStaticWithValues[F, S] + lastBuf fastStaticWithValues[F, S] +} + +var _ SpanAggregator = (*AvgOverTimeSpanAggregator[FastStatic1, StaticVals1])(nil) + +func NewAvgOverTimeSpanAggregator(attr Attribute, by []Attribute, start, end, step uint64) SpanAggregator { + lookups := make([][]Attribute, len(by)) + for i, attr := range by { + if attr.Intrinsic == IntrinsicNone && attr.Scope == AttributeScopeNone { + // Unscoped attribute. Check span-level, then resource-level. + // TODO - Is this taken care of by span.AttributeFor now? + lookups[i] = []Attribute{ + NewScopedAttribute(AttributeScopeSpan, false, attr.Name), + NewScopedAttribute(AttributeScopeResource, false, attr.Name), + } + } else { + lookups[i] = []Attribute{attr} + } + } + + aggNum := len(lookups) + + switch aggNum { + case 2: + return newAvgAggregator[FastStatic2, StaticVals2](attr, by, lookups, start, end, step) + case 3: + return newAvgAggregator[FastStatic3, StaticVals3](attr, by, lookups, start, end, step) + case 4: + return newAvgAggregator[FastStatic4, StaticVals4](attr, by, lookups, start, end, step) + case 5: + return newAvgAggregator[FastStatic5, StaticVals5](attr, by, lookups, start, end, step) + default: + return newAvgAggregator[FastStatic1, StaticVals1](attr, by, lookups, start, end, step) + } +} + +func newAvgAggregator[F FastStatic, S StaticVals](attr Attribute, by []Attribute, lookups [][]Attribute, start, end, step uint64) SpanAggregator { + var fn func(s Span) float64 + + switch attr { + case IntrinsicDurationAttribute: + fn = func(s Span) float64 { + return float64(s.DurationNanos()) / float64(time.Second) + } + default: + fn = func(s Span) float64 { + f, a := FloatizeAttribute(s, attr) + if a == TypeNil { + return math.Float64frombits(normalNaN) + } + return f + } + } + + return &AvgOverTimeSpanAggregator[F, S]{ + series: map[F]AvgOverTimeSeries[S]{}, + getSpanAttValue: fn, + by: by, + byLookups: lookups, + start: start, + end: end, + step: step, + } +} + +func (g *AvgOverTimeSpanAggregator[F, S]) Observe(span Span) { if !g.getGroupingValues(span) { return } @@ -187,15 +355,15 @@ func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { return t, c } -func (g *AvgAggregator[F, S]) ObserveExemplar(_ Span, _ float64, _ uint64) { +func (g *AvgOverTimeSpanAggregator[F, S]) ObserveExemplar(_ Span, _ float64, _ uint64) { } -func (g *AvgAggregator[F, S]) labelsFor(vals S, t string) (Labels, string) { +func (g *AvgOverTimeSpanAggregator[F, S]) labelsFor(vals S, t string) (Labels, string) { if g.by == nil { - serieLabel := make(Labels, 2) + serieLabel := make(Labels, 1, 2) serieLabel[0] = Label{labels.MetricName, NewStaticString(metricsAggregateAvgOverTime.String())} if t != "" { - serieLabel[1] = Label{"_type", NewStaticString(t)} + serieLabel = append(serieLabel, Label{"_type", NewStaticString(t)}) } return serieLabel, serieLabel.String() } @@ -219,18 +387,18 @@ func (g *AvgAggregator[F, S]) labelsFor(vals S, t string) (Labels, string) { return labels, labels.String() } -func (g *AvgAggregator[F, S]) Series() SeriesSet { +func (g *AvgOverTimeSpanAggregator[F, S]) Series() SeriesSet { ss := SeriesSet{} for _, s := range g.series { + // First, get the regular series labels, promLabelsAvg := g.labelsFor(s.vals, "") - ss[promLabelsAvg] = TimeSeries{ Labels: labels, Values: s.avg, Exemplars: []Exemplar{}, } - + // Second, get the "count" series labels, promLabelsCount := g.labelsFor(s.vals, "count") ss[promLabelsCount] = TimeSeries{ Labels: labels, @@ -242,7 +410,7 @@ func (g *AvgAggregator[F, S]) Series() SeriesSet { return ss } -func (g *AvgAggregator[F, S]) getGroupingValues(span Span) bool { +func (g *AvgOverTimeSpanAggregator[F, S]) getGroupingValues(span Span) bool { for i, lookups := range g.byLookups { val := lookup(lookups, span) g.buf.vals[i] = val @@ -253,7 +421,7 @@ func (g *AvgAggregator[F, S]) getGroupingValues(span Span) bool { // getSeries gets the series for the current span. // It will reuse the last series if possible. -func (g *AvgAggregator[F, S]) getSeries() AvgSeries[S] { +func (g *AvgOverTimeSpanAggregator[F, S]) getSeries() AvgOverTimeSeries[S] { // Fast path if g.lastSeries.init && g.lastBuf.fast == g.buf.fast { return g.lastSeries @@ -262,7 +430,7 @@ func (g *AvgAggregator[F, S]) getSeries() AvgSeries[S] { s, ok := g.series[g.buf.fast] if !ok { intervals := IntervalCount(g.start, g.end, g.step) - s = AvgSeries[S]{ + s = AvgOverTimeSeries[S]{ init: true, vals: g.buf.vals, count: make([]float64, intervals), @@ -280,167 +448,3 @@ func (g *AvgAggregator[F, S]) getSeries() AvgSeries[S] { g.lastSeries = s return s } - -func NewAvgAggregator(attr Attribute, by []Attribute, start, end, step uint64) SpanAggregator { - lookups := make([][]Attribute, len(by)) - for i, attr := range by { - if attr.Intrinsic == IntrinsicNone && attr.Scope == AttributeScopeNone { - // Unscoped attribute. Check span-level, then resource-level. - // TODO - Is this taken care of by span.AttributeFor now? - lookups[i] = []Attribute{ - NewScopedAttribute(AttributeScopeSpan, false, attr.Name), - NewScopedAttribute(AttributeScopeResource, false, attr.Name), - } - } else { - lookups[i] = []Attribute{attr} - } - } - - aggNum := len(lookups) - - switch aggNum { - case 2: - return newAvgAggregator[FastStatic2, StaticVals2](attr, by, lookups, start, end, step) - case 3: - return newAvgAggregator[FastStatic3, StaticVals3](attr, by, lookups, start, end, step) - case 4: - return newAvgAggregator[FastStatic4, StaticVals4](attr, by, lookups, start, end, step) - case 5: - return newAvgAggregator[FastStatic5, StaticVals5](attr, by, lookups, start, end, step) - default: - return newAvgAggregator[FastStatic1, StaticVals1](attr, by, lookups, start, end, step) - } -} - -func newAvgAggregator[F FastStatic, S StaticVals](attr Attribute, by []Attribute, lookups [][]Attribute, start, end, step uint64) SpanAggregator { - var fn func(s Span) float64 - - switch attr { - case IntrinsicDurationAttribute: - fn = func(s Span) float64 { - return float64(s.DurationNanos()) / float64(time.Second) - } - default: - fn = func(s Span) float64 { - f, a := FloatizeAttribute(s, attr) - if a == TypeNil { - return math.Float64frombits(normalNaN) - } - return f - } - } - - return &AvgAggregator[F, S]{ - series: map[F]AvgSeries[S]{}, - getSpanAttValue: fn, - by: by, - byLookups: lookups, - start: start, - end: end, - step: step, - } -} - -type MetricsAverageAggregate struct { - by []Attribute - attr Attribute - agg SpanAggregator - seriesAgg SeriesAggregator - exemplarFn getExemplar - mode AggregateMode -} - -var _ metricsFirstStageElement = (*MetricsAverageAggregate)(nil) - -func newMetricsAverageAggregateWithAttr(attr Attribute, by []Attribute) *MetricsAverageAggregate { - return &MetricsAverageAggregate{ - attr: attr, - by: by, - } -} - -func (a *MetricsAverageAggregate) init(q *tempopb.QueryRangeRequest, mode AggregateMode) { - exemplarFn := func(s Span) (float64, uint64) { - return math.NaN(), a.spanStartTimeMs(s) - } - - a.seriesAgg = &SimpleAverageSeriesAggregator{ - ss: make(SeriesSet), - countProm: make(map[string]string), - len: IntervalCount(q.Start, q.End, q.Step), - start: q.Start, - end: q.End, - step: q.Step, - } - - if mode == AggregateModeRaw { - a.agg = NewAvgAggregator(a.attr, a.by, q.Start, q.End, q.Step) - } - - a.exemplarFn = exemplarFn - a.mode = mode -} - -func (a *MetricsAverageAggregate) observe(span Span) { - a.agg.Observe(span) -} - -func (a *MetricsAverageAggregate) observeExemplar(span Span) { - v, ts := a.exemplarFn(span) - a.agg.ObserveExemplar(span, v, ts) -} - -func (a *MetricsAverageAggregate) observeSeries(ss []*tempopb.TimeSeries) { - a.seriesAgg.Combine(ss) -} - -func (a *MetricsAverageAggregate) result() SeriesSet { - if a.agg != nil { - return a.agg.Series() - } - - // In the frontend-version the results come from - // the job-level aggregator - ss := a.seriesAgg.Results() - if a.mode == AggregateModeFinal { - for i := range ss { - if strings.Contains(i, "_type") { - delete(ss, i) - } - } - } - return ss -} - -func (a *MetricsAverageAggregate) extractConditions(request *FetchSpansRequest) { - // For metrics aggregators based on a span attribute we have to include it - includeAttribute := a.attr != (Attribute{}) && !request.HasAttribute(a.attr) - if includeAttribute { - request.SecondPassConditions = append(request.SecondPassConditions, Condition{ - Attribute: a.attr, - }) - } - - for _, b := range a.by { - if !request.HasAttribute(b) { - request.SecondPassConditions = append(request.SecondPassConditions, Condition{ - Attribute: b, - }) - } - } -} - -func (a *MetricsAverageAggregate) validate() error { - if len(a.by) >= maxGroupBys { - return newUnsupportedError(fmt.Sprintf("metrics group by %v values", len(a.by))) - } - return nil -} - -func (a *MetricsAverageAggregate) spanStartTimeMs(s Span) uint64 { - return s.StartTimeUnixNanos() / uint64(time.Millisecond) -} - -func (a *MetricsAverageAggregate) String() string { - return "avg(" + a.attr.String() + ")" -} diff --git a/pkg/traceql/engine_metrics_test.go b/pkg/traceql/engine_metrics_test.go index eb8fae70bab..2fc0e356896 100644 --- a/pkg/traceql/engine_metrics_test.go +++ b/pkg/traceql/engine_metrics_test.go @@ -656,7 +656,7 @@ func TestAvgOverTimeForDuration(t *testing.T) { assert.True(t, math.IsNaN(fooBar.Values[2])) } -func TestAvgOverTimeForDuration2(t *testing.T) { +func TestAvgOverTimeForDurationWithoutAggregation(t *testing.T) { req := &tempopb.QueryRangeRequest{ Start: uint64(1 * time.Second), End: uint64(3 * time.Second), @@ -677,22 +677,15 @@ func TestAvgOverTimeForDuration2(t *testing.T) { newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithDuration(100), newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithDuration(200), - newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithDuration(300), + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "bar").WithDuration(300), } result := runTraceQLMetric(t, req, in) + avg := result[`{__name__="avg_over_time"}`] - fooBaz := result[`{span.foo="baz"}`] - fooBar := result[`{span.foo="bar"}`] - - // We cannot compare with require.Equal because NaN != NaN - assert.True(t, math.IsNaN(fooBaz.Values[0])) - assert.True(t, math.IsNaN(fooBaz.Values[1])) - assert.Equal(t, 200., fooBaz.Values[2]*float64(time.Second)) - - assert.Equal(t, 100., fooBar.Values[0]*float64(time.Second)) - assert.Equal(t, 200., fooBar.Values[1]*float64(time.Second)) - assert.True(t, math.IsNaN(fooBar.Values[2])) + assert.Equal(t, 100., avg.Values[0]*float64(time.Second)) + assert.Equal(t, 200., avg.Values[1]*float64(time.Second)) + assert.Equal(t, 200., avg.Values[2]*float64(time.Second)) } func TestAvgOverTimeForSpanAttribute(t *testing.T) { @@ -796,6 +789,81 @@ func TestAvgOverTimeWithNoMatch(t *testing.T) { assert.True(t, len(ts) == 0) } +func TestObserveSeriesAverageOverTimeForSpanAttribute(t *testing.T) { + req := &tempopb.QueryRangeRequest{ + Start: uint64(1 * time.Second), + End: uint64(3 * time.Second), + Step: uint64(1 * time.Second), + Query: "{ } | avg_over_time(span.http.status_code) by (span.foo)", + } + + // A variety of spans across times, durations, and series. All durations are powers of 2 for simplicity + in := []Span{ + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200), + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 300), + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 400), + + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 100), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 100), + + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 200), + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 400), + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 500), + } + + in2 := []Span{ + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 100), + newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 300), + + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 400), + newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200), + + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 100), + newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 100), + } + + e := NewEngine() + layer1A, _ := e.CompileMetricsQueryRange(req, 0, 0, false) + layer1B, _ := e.CompileMetricsQueryRange(req, 0, 0, false) + layer2A, _ := e.CompileMetricsQueryRangeNonRaw(req, AggregateModeSum) + layer2B, _ := e.CompileMetricsQueryRangeNonRaw(req, AggregateModeSum) + layer3, _ := e.CompileMetricsQueryRangeNonRaw(req, AggregateModeFinal) + + for _, s := range in { + layer1A.metricsPipeline.observe(s) + } + + layer2A.ObserveSeries(layer1A.Results().ToProto(req)) + + for _, s := range in2 { + layer1B.metricsPipeline.observe(s) + } + + layer2B.ObserveSeries(layer1B.Results().ToProto(req)) + + layer3.ObserveSeries(layer2A.Results().ToProto(req)) + layer3.ObserveSeries(layer2B.Results().ToProto(req)) + + result := layer3.Results() + + fooBaz := result[`{span.foo="baz"}`] + fooBar := result[`{span.foo="bar"}`] + + // Alas,we cannot compare with require.Equal because NaN != NaN + // foo.baz = (NaN, NaN, 300) + assert.True(t, math.IsNaN(fooBaz.Values[0])) + assert.True(t, math.IsNaN(fooBaz.Values[1])) + // 300 = (200 + 400 + 500 + 100) / 4 + assert.Equal(t, 300.0, fooBaz.Values[2]) + + // foo.bar = (260,200, 100) + assert.Equal(t, 260.0, fooBar.Values[0]) + assert.Equal(t, 200.0, fooBar.Values[1]) + assert.Equal(t, 100.0, fooBar.Values[2]) +} + func TestMaxOverTimeForDuration(t *testing.T) { req := &tempopb.QueryRangeRequest{ Start: uint64(1 * time.Second), @@ -1010,67 +1078,6 @@ func TestHistogramOverTime(t *testing.T) { require.Equal(t, out, result) } -func TestObserveSeriesAverageOverTimeForSpanAttribute(t *testing.T) { - req := &tempopb.QueryRangeRequest{ - Start: uint64(1 * time.Second), - End: uint64(3 * time.Second), - Step: uint64(1 * time.Second), - Query: "{ } | avg_over_time(span.http.status_code) by (span.foo)", - } - - // A variety of spans across times, durations, and series. All durations are powers of 2 for simplicity - in := []Span{ - newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200), - newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 300), - newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 400), - - newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200), - newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200), - newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 100), - newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 100), - - newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 201), - newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 401), - newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 500), - } - - in2 := []Span{ - newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 100), - newMockSpan(nil).WithStartTime(uint64(1*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 300), - - newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 400), - newMockSpan(nil).WithStartTime(uint64(2*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 200), - - newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "baz").WithSpanInt("http.status_code", 400), - newMockSpan(nil).WithStartTime(uint64(3*time.Second)).WithSpanString("foo", "bar").WithSpanInt("http.status_code", 100), - } - - e := NewEngine() - layer1A, _ := e.CompileMetricsQueryRange(req, 0, 0, false) - layer1B, _ := e.CompileMetricsQueryRange(req, 0, 0, false) - layer2A, _ := e.CompileMetricsQueryRangeNonRaw(req, AggregateModeSum) - layer2B, _ := e.CompileMetricsQueryRangeNonRaw(req, AggregateModeSum) - layer3, _ := e.CompileMetricsQueryRangeNonRaw(req, AggregateModeFinal) - - for _, s := range in { - layer1A.metricsPipeline.observe(s) - } - - layer2A.ObserveSeries(layer1A.Results().ToProto(req)) - - for _, s := range in2 { - layer1B.metricsPipeline.observe(s) - } - - layer2B.ObserveSeries(layer1B.Results().ToProto(req)) - - layer3.ObserveSeries(layer2A.Results().ToProto(req)) - layer3.ObserveSeries(layer2B.Results().ToProto(req)) - - res := layer3.Results().ToProto(req) - assert.NotNil(t, res) -} - func runTraceQLMetric(t *testing.T, req *tempopb.QueryRangeRequest, inSpans ...[]Span) SeriesSet { e := NewEngine() diff --git a/pkg/traceql/expr.y b/pkg/traceql/expr.y index efdb4a06558..538c6e1209a 100644 --- a/pkg/traceql/expr.y +++ b/pkg/traceql/expr.y @@ -302,8 +302,8 @@ metricsAggregation: | MIN_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS BY OPEN_PARENS attributeList CLOSE_PARENS { $$ = newMetricsAggregateWithAttr(metricsAggregateMinOverTime, $3, $7) } | MAX_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS { $$ = newMetricsAggregateWithAttr(metricsAggregateMaxOverTime, $3, nil) } | MAX_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS BY OPEN_PARENS attributeList CLOSE_PARENS { $$ = newMetricsAggregateWithAttr(metricsAggregateMaxOverTime, $3, $7) } - | AVG_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS { $$ = newMetricsAverageAggregateWithAttr($3, nil) } - | AVG_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS BY OPEN_PARENS attributeList CLOSE_PARENS { $$ = newMetricsAverageAggregateWithAttr($3, $7) } + | AVG_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS { $$ = newAverageOverTimeMetricsAggregator($3, nil) } + | AVG_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS BY OPEN_PARENS attributeList CLOSE_PARENS { $$ = newAverageOverTimeMetricsAggregator($3, $7) } | QUANTILE_OVER_TIME OPEN_PARENS attribute COMMA numericList CLOSE_PARENS { $$ = newMetricsAggregateQuantileOverTime($3, $5, nil) } | QUANTILE_OVER_TIME OPEN_PARENS attribute COMMA numericList CLOSE_PARENS BY OPEN_PARENS attributeList CLOSE_PARENS { $$ = newMetricsAggregateQuantileOverTime($3, $5, $9) } | HISTOGRAM_OVER_TIME OPEN_PARENS attribute CLOSE_PARENS { $$ = newMetricsAggregateWithAttr(metricsAggregateHistogramOverTime, $3, nil) } diff --git a/pkg/traceql/expr.y.go b/pkg/traceql/expr.y.go index 915f9197de2..f8855be5682 100644 --- a/pkg/traceql/expr.y.go +++ b/pkg/traceql/expr.y.go @@ -1613,13 +1613,13 @@ yydefault: yyDollar = yyS[yypt-4 : yypt+1] //line pkg/traceql/expr.y:305 { - yyVAL.metricsAggregation = newMetricsAverageAggregateWithAttr(yyDollar[3].attribute, nil) + yyVAL.metricsAggregation = newAverageOverTimeMetricsAggregator(yyDollar[3].attribute, nil) } case 115: yyDollar = yyS[yypt-8 : yypt+1] //line pkg/traceql/expr.y:306 { - yyVAL.metricsAggregation = newMetricsAverageAggregateWithAttr(yyDollar[3].attribute, yyDollar[7].attributeList) + yyVAL.metricsAggregation = newAverageOverTimeMetricsAggregator(yyDollar[3].attribute, yyDollar[7].attributeList) } case 116: yyDollar = yyS[yypt-6 : yypt+1] From 378999a6d08520908aa7ee8e3f8c2213f0d3e095 Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Thu, 17 Oct 2024 15:34:07 +0200 Subject: [PATCH 08/29] added exemplars --- pkg/traceql/engine_metrics_average.go | 159 +++++++++++++++++++------- 1 file changed, 115 insertions(+), 44 deletions(-) diff --git a/pkg/traceql/engine_metrics_average.go b/pkg/traceql/engine_metrics_average.go index b35a8225b00..fc62405973f 100644 --- a/pkg/traceql/engine_metrics_average.go +++ b/pkg/traceql/engine_metrics_average.go @@ -38,12 +38,13 @@ func (a *AverageOverTimeAggregator) init(q *tempopb.QueryRangeRequest, mode Aggr } a.seriesAgg = &SpanSetsAverageOverTimeAggregator{ - ss: make(SeriesSet), - countProm: make(map[string]string), - len: IntervalCount(q.Start, q.End, q.Step), - start: q.Start, - end: q.End, - step: q.Step, + ss: make(SeriesSet), + countProm: make(map[string]string), + len: IntervalCount(q.Start, q.End, q.Step), + start: q.Start, + end: q.End, + step: q.Step, + exemplarBuckets: newBucketSet(IntervalCount(q.Start, q.End, q.Step)), } if mode == AggregateModeRaw { @@ -123,14 +124,53 @@ type SpanSetsAverageOverTimeAggregator struct { countProm map[string]string len int start, end, step uint64 + exemplarBuckets *bucketSet } -var _ SeriesAggregator = (*SpanSetsAverageOverTimeAggregator)(nil) +var ( + _ SeriesAggregator = (*SpanSetsAverageOverTimeAggregator)(nil) + nan = math.Float64frombits(normalNaN) +) func (b *SpanSetsAverageOverTimeAggregator) Combine(in []*tempopb.TimeSeries) { newCountersTS := make(map[string][]float64) - nan := math.Float64frombits(normalNaN) + b.initSeriesAggregator(in, newCountersTS) + for _, ts := range in { + counterLabel, ok := b.countProm[ts.PromLabels] + if !ok { + // This is a counter label, we can skip it + continue + } + existing := b.ss[ts.PromLabels] + for _, sample := range ts.Samples { + pos := IntervalOfMs(sample.TimestampMs, b.start, b.end, b.step) + if pos < 0 || pos > len(b.ss[ts.PromLabels].Values) { + continue + } + + currentAvg := b.ss[ts.PromLabels].Values[pos] + newAvg := sample.Value + currentCount := b.ss[counterLabel].Values[pos] + newCount := newCountersTS[ts.PromLabels][pos] + + if math.IsNaN(currentAvg) && !math.IsNaN(newAvg) { + b.ss[ts.PromLabels].Values[pos] = newAvg + b.ss[counterLabel].Values[pos] = newCount + } else if !math.IsNaN(newAvg) { + // Weighted mean + avg := (currentAvg*currentCount + newAvg*newCount) / (currentCount + newCount) + b.ss[ts.PromLabels].Values[pos] = avg + b.ss[counterLabel].Values[pos] = currentCount + newCount + } + } + + b.aggregateExemplars(ts, &existing) + b.ss[ts.PromLabels] = existing + } +} + +func (b *SpanSetsAverageOverTimeAggregator) initSeriesAggregator(in []*tempopb.TimeSeries, newCountersTS map[string][]float64) { for _, ts := range in { counterPromLabel := "" if strings.Contains(ts.PromLabels, "_type") { @@ -158,32 +198,33 @@ func (b *SpanSetsAverageOverTimeAggregator) Combine(in []*tempopb.TimeSeries) { b.ss[ts.PromLabels] = n } } - for _, ts := range in { - counterLabel, ok := b.countProm[ts.PromLabels] - if !ok { - // This is a counter label, we can skip it - continue - } - for _, sample := range ts.Samples { - pos := IntervalOfMs(sample.TimestampMs, b.start, b.end, b.step) - if pos < 0 || pos > len(b.ss[ts.PromLabels].Values) { - continue - } - - currentAvg := b.ss[ts.PromLabels].Values[pos] - newAvg := sample.Value - currentCount := b.ss[counterLabel].Values[pos] - newCount := newCountersTS[ts.PromLabels][pos] +} - if math.IsNaN(currentAvg) && !math.IsNaN(newAvg) { - b.ss[ts.PromLabels].Values[pos] = newAvg - b.ss[counterLabel].Values[pos] = newCount - } else if !math.IsNaN(newAvg) { - avg := (currentAvg*currentCount + newAvg*newCount) / (currentCount + newCount) - b.ss[ts.PromLabels].Values[pos] = avg - b.ss[counterLabel].Values[pos] = currentCount + newCount - } +func (b *SpanSetsAverageOverTimeAggregator) aggregateExemplars(ts *tempopb.TimeSeries, existing *TimeSeries) { + for _, exemplar := range ts.Exemplars { + if b.exemplarBuckets.testTotal() { + break } + interval := IntervalOfMs(exemplar.TimestampMs, b.start, b.end, b.step) + if b.exemplarBuckets.addAndTest(interval) { + continue // Skip this exemplar and continue, next exemplar might fit in a different bucket } + } + labels := make(Labels, 0, len(exemplar.Labels)) + for _, l := range exemplar.Labels { + labels = append(labels, Label{ + Name: l.Key, + Value: StaticFromAnyValue(l.Value), + }) + } + value := exemplar.Value + if math.IsNaN(value) { + value = 0 // TODO: Use the value of the series at the same timestamp + } + existing.Exemplars = append(existing.Exemplars, Exemplar{ + Labels: labels, + Value: value, + TimestampMs: uint64(exemplar.TimestampMs), + }) } } @@ -207,11 +248,13 @@ func (b *SpanSetsAverageOverTimeAggregator) Results() SeriesSet { // Accumulated results of average over time type AvgOverTimeSeries[S StaticVals] struct { - avg []float64 - count []float64 - compensation []float64 - init bool - vals S + avg []float64 + count []float64 + compensation []float64 + exemplars []Exemplar + exemplarBuckets *bucketSet + init bool + vals S } // In charge of calculating the average over time for a set of spans @@ -355,7 +398,33 @@ func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { return t, c } -func (g *AvgOverTimeSpanAggregator[F, S]) ObserveExemplar(_ Span, _ float64, _ uint64) { +func (g *AvgOverTimeSpanAggregator[F, S]) ObserveExemplar(span Span, value float64, ts uint64) { + if !g.getGroupingValues(span) { + return + } + + // Observe exemplar + all := span.AllAttributes() + lbls := make(Labels, 0, len(all)) + for k, v := range span.AllAttributes() { + lbls = append(lbls, Label{k.String(), v}) + } + s := g.getSeries() + + if s.exemplarBuckets.testTotal() { + return + } + interval := IntervalOfMs(int64(ts), g.start, g.end, g.step) + if s.exemplarBuckets.addAndTest(interval) { + return + } + + s.exemplars = append(s.exemplars, Exemplar{ + Labels: lbls, + Value: value, + TimestampMs: ts, + }) + g.series[g.buf.fast] = s } func (g *AvgOverTimeSpanAggregator[F, S]) labelsFor(vals S, t string) (Labels, string) { @@ -396,7 +465,7 @@ func (g *AvgOverTimeSpanAggregator[F, S]) Series() SeriesSet { ss[promLabelsAvg] = TimeSeries{ Labels: labels, Values: s.avg, - Exemplars: []Exemplar{}, + Exemplars: s.exemplars, } // Second, get the "count" series labels, promLabelsCount := g.labelsFor(s.vals, "count") @@ -431,11 +500,13 @@ func (g *AvgOverTimeSpanAggregator[F, S]) getSeries() AvgOverTimeSeries[S] { if !ok { intervals := IntervalCount(g.start, g.end, g.step) s = AvgOverTimeSeries[S]{ - init: true, - vals: g.buf.vals, - count: make([]float64, intervals), - avg: make([]float64, intervals), - compensation: make([]float64, intervals), + init: true, + vals: g.buf.vals, + count: make([]float64, intervals), + avg: make([]float64, intervals), + compensation: make([]float64, intervals), + exemplars: make([]Exemplar, 0, maxExemplars), + exemplarBuckets: newBucketSet(intervals), } for i := 0; i < intervals; i++ { s.avg[i] = math.Float64frombits(normalNaN) From afe513fffe05bf64c69a876a1374c511565c54ef Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Thu, 17 Oct 2024 15:48:02 +0200 Subject: [PATCH 09/29] fix tests --- pkg/traceql/engine_metrics_average.go | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/pkg/traceql/engine_metrics_average.go b/pkg/traceql/engine_metrics_average.go index fc62405973f..32be470697c 100644 --- a/pkg/traceql/engine_metrics_average.go +++ b/pkg/traceql/engine_metrics_average.go @@ -116,7 +116,26 @@ func (a *AverageOverTimeAggregator) spanStartTimeMs(s Span) uint64 { } func (a *AverageOverTimeAggregator) String() string { - return "avg(" + a.attr.String() + ")" + s := strings.Builder{} + + s.WriteString(metricsAggregateAvgOverTime.String()) + s.WriteString("(") + if a.attr != (Attribute{}) { + s.WriteString(a.attr.String()) + } + s.WriteString(")") + + if len(a.by) > 0 { + s.WriteString("by(") + for i, b := range a.by { + s.WriteString(b.String()) + if i < len(a.by)-1 { + s.WriteString(",") + } + } + s.WriteString(")") + } + return s.String() } type SpanSetsAverageOverTimeAggregator struct { From 0691946f49c0672265ff3864de92b2b0ddfde3ec Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Thu, 17 Oct 2024 15:55:31 +0200 Subject: [PATCH 10/29] fix another test --- pkg/traceql/parse_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/traceql/parse_test.go b/pkg/traceql/parse_test.go index fb208e588f5..fe24606e00a 100644 --- a/pkg/traceql/parse_test.go +++ b/pkg/traceql/parse_test.go @@ -1397,7 +1397,7 @@ func TestMetrics(t *testing.T) { in: `{ } | avg_over_time(duration) by(name, span.http.status_code)`, expected: newRootExprWithMetrics( newPipeline(newSpansetFilter(NewStaticBool(true))), - newMetricsAggregateWithAttr(metricsAggregateAvgOverTime, + newAverageOverTimeMetricsAggregator( NewIntrinsic(IntrinsicDuration), []Attribute{ NewIntrinsic(IntrinsicName), From f55b1db4f2c7f8acb831d62468a5aa78ab7eda12 Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Mon, 21 Oct 2024 11:49:46 +0200 Subject: [PATCH 11/29] use constants --- pkg/traceql/engine_metrics.go | 2 ++ pkg/traceql/engine_metrics_average.go | 12 ++++++------ pkg/traceql/engine_metrics_compare.go | 1 - 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/pkg/traceql/engine_metrics.go b/pkg/traceql/engine_metrics.go index 7c711fe8fbf..44f4de6cffe 100644 --- a/pkg/traceql/engine_metrics.go +++ b/pkg/traceql/engine_metrics.go @@ -17,6 +17,8 @@ import ( ) const ( + internalLabelMetaType = "__meta_type" + internalMetaTypeCount = "__count" internalLabelBucket = "__bucket" maxExemplars = 100 maxExemplarsPerBucket = 2 diff --git a/pkg/traceql/engine_metrics_average.go b/pkg/traceql/engine_metrics_average.go index 32be470697c..dafe7ecee18 100644 --- a/pkg/traceql/engine_metrics_average.go +++ b/pkg/traceql/engine_metrics_average.go @@ -78,7 +78,7 @@ func (a *AverageOverTimeAggregator) result() SeriesSet { ss := a.seriesAgg.Results() if a.mode == AggregateModeFinal { for i := range ss { - if strings.Contains(i, "_type") { + if strings.Contains(i, internalLabelMetaType) { delete(ss, i) } } @@ -192,8 +192,8 @@ func (b *SpanSetsAverageOverTimeAggregator) Combine(in []*tempopb.TimeSeries) { func (b *SpanSetsAverageOverTimeAggregator) initSeriesAggregator(in []*tempopb.TimeSeries, newCountersTS map[string][]float64) { for _, ts := range in { counterPromLabel := "" - if strings.Contains(ts.PromLabels, "_type") { - counterPromLabel = getLabels(ts.Labels, "_type").String() + if strings.Contains(ts.PromLabels, internalLabelMetaType) { + counterPromLabel = getLabels(ts.Labels, internalLabelMetaType).String() newCountersTS[counterPromLabel] = make([]float64, b.len) for i, sample := range ts.Samples { newCountersTS[counterPromLabel][i] = sample.Value @@ -451,7 +451,7 @@ func (g *AvgOverTimeSpanAggregator[F, S]) labelsFor(vals S, t string) (Labels, s serieLabel := make(Labels, 1, 2) serieLabel[0] = Label{labels.MetricName, NewStaticString(metricsAggregateAvgOverTime.String())} if t != "" { - serieLabel = append(serieLabel, Label{"_type", NewStaticString(t)}) + serieLabel = append(serieLabel, Label{internalLabelMetaType, NewStaticString(t)}) } return serieLabel, serieLabel.String() } @@ -469,7 +469,7 @@ func (g *AvgOverTimeSpanAggregator[F, S]) labelsFor(vals S, t string) (Labels, s } if t != "" { - labels = append(labels, Label{"_type", NewStaticString(t)}) + labels = append(labels, Label{internalLabelMetaType, NewStaticString(t)}) } return labels, labels.String() @@ -487,7 +487,7 @@ func (g *AvgOverTimeSpanAggregator[F, S]) Series() SeriesSet { Exemplars: s.exemplars, } // Second, get the "count" series - labels, promLabelsCount := g.labelsFor(s.vals, "count") + labels, promLabelsCount := g.labelsFor(s.vals, internalMetaTypeCount) ss[promLabelsCount] = TimeSeries{ Labels: labels, Values: s.count, diff --git a/pkg/traceql/engine_metrics_compare.go b/pkg/traceql/engine_metrics_compare.go index 292b3e13785..37ab60de569 100644 --- a/pkg/traceql/engine_metrics_compare.go +++ b/pkg/traceql/engine_metrics_compare.go @@ -10,7 +10,6 @@ import ( ) const ( - internalLabelMetaType = "__meta_type" internalMetaTypeBaseline = "baseline" internalMetaTypeSelection = "selection" internalMetaTypeBaselineTotal = "baseline_total" From 3c5e2f052834269346199e861773a38d1b51d5ed Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Mon, 21 Oct 2024 15:51:19 +0200 Subject: [PATCH 12/29] unexport structs and methods --- pkg/traceql/engine_metrics_average.go | 72 +++++++++++++-------------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/pkg/traceql/engine_metrics_average.go b/pkg/traceql/engine_metrics_average.go index dafe7ecee18..e14cbb5d7b8 100644 --- a/pkg/traceql/engine_metrics_average.go +++ b/pkg/traceql/engine_metrics_average.go @@ -12,7 +12,7 @@ import ( ) // Average over time aggregator -type AverageOverTimeAggregator struct { +type averageOverTimeAggregator struct { by []Attribute attr Attribute // Average over time span aggregator @@ -23,21 +23,21 @@ type AverageOverTimeAggregator struct { mode AggregateMode } -var _ metricsFirstStageElement = (*AverageOverTimeAggregator)(nil) +var _ metricsFirstStageElement = (*averageOverTimeAggregator)(nil) -func newAverageOverTimeMetricsAggregator(attr Attribute, by []Attribute) *AverageOverTimeAggregator { - return &AverageOverTimeAggregator{ +func newAverageOverTimeMetricsAggregator(attr Attribute, by []Attribute) *averageOverTimeAggregator { + return &averageOverTimeAggregator{ attr: attr, by: by, } } -func (a *AverageOverTimeAggregator) init(q *tempopb.QueryRangeRequest, mode AggregateMode) { +func (a *averageOverTimeAggregator) init(q *tempopb.QueryRangeRequest, mode AggregateMode) { exemplarFn := func(s Span) (float64, uint64) { return math.NaN(), a.spanStartTimeMs(s) } - a.seriesAgg = &SpanSetsAverageOverTimeAggregator{ + a.seriesAgg = &averageOverTimeSeriesAggregator{ ss: make(SeriesSet), countProm: make(map[string]string), len: IntervalCount(q.Start, q.End, q.Step), @@ -48,27 +48,27 @@ func (a *AverageOverTimeAggregator) init(q *tempopb.QueryRangeRequest, mode Aggr } if mode == AggregateModeRaw { - a.agg = NewAvgOverTimeSpanAggregator(a.attr, a.by, q.Start, q.End, q.Step) + a.agg = newAvgOverTimeSpanAggregator(a.attr, a.by, q.Start, q.End, q.Step) } a.exemplarFn = exemplarFn a.mode = mode } -func (a *AverageOverTimeAggregator) observe(span Span) { +func (a *averageOverTimeAggregator) observe(span Span) { a.agg.Observe(span) } -func (a *AverageOverTimeAggregator) observeExemplar(span Span) { +func (a *averageOverTimeAggregator) observeExemplar(span Span) { v, ts := a.exemplarFn(span) a.agg.ObserveExemplar(span, v, ts) } -func (a *AverageOverTimeAggregator) observeSeries(ss []*tempopb.TimeSeries) { +func (a *averageOverTimeAggregator) observeSeries(ss []*tempopb.TimeSeries) { a.seriesAgg.Combine(ss) } -func (a *AverageOverTimeAggregator) result() SeriesSet { +func (a *averageOverTimeAggregator) result() SeriesSet { if a.agg != nil { return a.agg.Series() } @@ -86,7 +86,7 @@ func (a *AverageOverTimeAggregator) result() SeriesSet { return ss } -func (a *AverageOverTimeAggregator) extractConditions(request *FetchSpansRequest) { +func (a *averageOverTimeAggregator) extractConditions(request *FetchSpansRequest) { // For metrics aggregators based on a span attribute we have to include it includeAttribute := a.attr != (Attribute{}) && !request.HasAttribute(a.attr) if includeAttribute { @@ -104,18 +104,18 @@ func (a *AverageOverTimeAggregator) extractConditions(request *FetchSpansRequest } } -func (a *AverageOverTimeAggregator) validate() error { +func (a *averageOverTimeAggregator) validate() error { if len(a.by) >= maxGroupBys { return newUnsupportedError(fmt.Sprintf("metrics group by %v values", len(a.by))) } return nil } -func (a *AverageOverTimeAggregator) spanStartTimeMs(s Span) uint64 { +func (a *averageOverTimeAggregator) spanStartTimeMs(s Span) uint64 { return s.StartTimeUnixNanos() / uint64(time.Millisecond) } -func (a *AverageOverTimeAggregator) String() string { +func (a *averageOverTimeAggregator) String() string { s := strings.Builder{} s.WriteString(metricsAggregateAvgOverTime.String()) @@ -138,7 +138,7 @@ func (a *AverageOverTimeAggregator) String() string { return s.String() } -type SpanSetsAverageOverTimeAggregator struct { +type averageOverTimeSeriesAggregator struct { ss SeriesSet countProm map[string]string len int @@ -147,11 +147,11 @@ type SpanSetsAverageOverTimeAggregator struct { } var ( - _ SeriesAggregator = (*SpanSetsAverageOverTimeAggregator)(nil) + _ SeriesAggregator = (*averageOverTimeSeriesAggregator)(nil) nan = math.Float64frombits(normalNaN) ) -func (b *SpanSetsAverageOverTimeAggregator) Combine(in []*tempopb.TimeSeries) { +func (b *averageOverTimeSeriesAggregator) Combine(in []*tempopb.TimeSeries) { newCountersTS := make(map[string][]float64) b.initSeriesAggregator(in, newCountersTS) @@ -189,7 +189,7 @@ func (b *SpanSetsAverageOverTimeAggregator) Combine(in []*tempopb.TimeSeries) { } } -func (b *SpanSetsAverageOverTimeAggregator) initSeriesAggregator(in []*tempopb.TimeSeries, newCountersTS map[string][]float64) { +func (b *averageOverTimeSeriesAggregator) initSeriesAggregator(in []*tempopb.TimeSeries, newCountersTS map[string][]float64) { for _, ts := range in { counterPromLabel := "" if strings.Contains(ts.PromLabels, internalLabelMetaType) { @@ -219,7 +219,7 @@ func (b *SpanSetsAverageOverTimeAggregator) initSeriesAggregator(in []*tempopb.T } } -func (b *SpanSetsAverageOverTimeAggregator) aggregateExemplars(ts *tempopb.TimeSeries, existing *TimeSeries) { +func (b *averageOverTimeSeriesAggregator) aggregateExemplars(ts *tempopb.TimeSeries, existing *TimeSeries) { for _, exemplar := range ts.Exemplars { if b.exemplarBuckets.testTotal() { break @@ -261,12 +261,12 @@ func getLabels(vals []v1.KeyValue, skipKey string) Labels { return labels } -func (b *SpanSetsAverageOverTimeAggregator) Results() SeriesSet { +func (b *averageOverTimeSeriesAggregator) Results() SeriesSet { return b.ss } // Accumulated results of average over time -type AvgOverTimeSeries[S StaticVals] struct { +type avgOverTimeSeries[S StaticVals] struct { avg []float64 count []float64 compensation []float64 @@ -278,7 +278,7 @@ type AvgOverTimeSeries[S StaticVals] struct { // In charge of calculating the average over time for a set of spans // First aggregation layer -type AvgOverTimeSpanAggregator[F FastStatic, S StaticVals] struct { +type avgOverTimeSpanAggregator[F FastStatic, S StaticVals] struct { // Config by []Attribute // Original attributes: .foo byLookups [][]Attribute // Lookups: span.foo resource.foo @@ -288,15 +288,15 @@ type AvgOverTimeSpanAggregator[F FastStatic, S StaticVals] struct { step uint64 // Data - series map[F]AvgOverTimeSeries[S] - lastSeries AvgOverTimeSeries[S] + series map[F]avgOverTimeSeries[S] + lastSeries avgOverTimeSeries[S] buf fastStaticWithValues[F, S] lastBuf fastStaticWithValues[F, S] } -var _ SpanAggregator = (*AvgOverTimeSpanAggregator[FastStatic1, StaticVals1])(nil) +var _ SpanAggregator = (*avgOverTimeSpanAggregator[FastStatic1, StaticVals1])(nil) -func NewAvgOverTimeSpanAggregator(attr Attribute, by []Attribute, start, end, step uint64) SpanAggregator { +func newAvgOverTimeSpanAggregator(attr Attribute, by []Attribute, start, end, step uint64) SpanAggregator { lookups := make([][]Attribute, len(by)) for i, attr := range by { if attr.Intrinsic == IntrinsicNone && attr.Scope == AttributeScopeNone { @@ -345,8 +345,8 @@ func newAvgAggregator[F FastStatic, S StaticVals](attr Attribute, by []Attribute } } - return &AvgOverTimeSpanAggregator[F, S]{ - series: map[F]AvgOverTimeSeries[S]{}, + return &avgOverTimeSpanAggregator[F, S]{ + series: map[F]avgOverTimeSeries[S]{}, getSpanAttValue: fn, by: by, byLookups: lookups, @@ -356,7 +356,7 @@ func newAvgAggregator[F FastStatic, S StaticVals](attr Attribute, by []Attribute } } -func (g *AvgOverTimeSpanAggregator[F, S]) Observe(span Span) { +func (g *avgOverTimeSpanAggregator[F, S]) Observe(span Span) { if !g.getGroupingValues(span) { return } @@ -417,7 +417,7 @@ func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { return t, c } -func (g *AvgOverTimeSpanAggregator[F, S]) ObserveExemplar(span Span, value float64, ts uint64) { +func (g *avgOverTimeSpanAggregator[F, S]) ObserveExemplar(span Span, value float64, ts uint64) { if !g.getGroupingValues(span) { return } @@ -446,7 +446,7 @@ func (g *AvgOverTimeSpanAggregator[F, S]) ObserveExemplar(span Span, value float g.series[g.buf.fast] = s } -func (g *AvgOverTimeSpanAggregator[F, S]) labelsFor(vals S, t string) (Labels, string) { +func (g *avgOverTimeSpanAggregator[F, S]) labelsFor(vals S, t string) (Labels, string) { if g.by == nil { serieLabel := make(Labels, 1, 2) serieLabel[0] = Label{labels.MetricName, NewStaticString(metricsAggregateAvgOverTime.String())} @@ -475,7 +475,7 @@ func (g *AvgOverTimeSpanAggregator[F, S]) labelsFor(vals S, t string) (Labels, s return labels, labels.String() } -func (g *AvgOverTimeSpanAggregator[F, S]) Series() SeriesSet { +func (g *avgOverTimeSpanAggregator[F, S]) Series() SeriesSet { ss := SeriesSet{} for _, s := range g.series { @@ -498,7 +498,7 @@ func (g *AvgOverTimeSpanAggregator[F, S]) Series() SeriesSet { return ss } -func (g *AvgOverTimeSpanAggregator[F, S]) getGroupingValues(span Span) bool { +func (g *avgOverTimeSpanAggregator[F, S]) getGroupingValues(span Span) bool { for i, lookups := range g.byLookups { val := lookup(lookups, span) g.buf.vals[i] = val @@ -509,7 +509,7 @@ func (g *AvgOverTimeSpanAggregator[F, S]) getGroupingValues(span Span) bool { // getSeries gets the series for the current span. // It will reuse the last series if possible. -func (g *AvgOverTimeSpanAggregator[F, S]) getSeries() AvgOverTimeSeries[S] { +func (g *avgOverTimeSpanAggregator[F, S]) getSeries() avgOverTimeSeries[S] { // Fast path if g.lastSeries.init && g.lastBuf.fast == g.buf.fast { return g.lastSeries @@ -518,7 +518,7 @@ func (g *AvgOverTimeSpanAggregator[F, S]) getSeries() AvgOverTimeSeries[S] { s, ok := g.series[g.buf.fast] if !ok { intervals := IntervalCount(g.start, g.end, g.step) - s = AvgOverTimeSeries[S]{ + s = avgOverTimeSeries[S]{ init: true, vals: g.buf.vals, count: make([]float64, intervals), From 0cf9dfb5ae1250f9684fb75f8d6ab1d2a04bea97 Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Mon, 21 Oct 2024 15:55:17 +0200 Subject: [PATCH 13/29] refactor getSeries --- pkg/traceql/engine_metrics_average.go | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/pkg/traceql/engine_metrics_average.go b/pkg/traceql/engine_metrics_average.go index e14cbb5d7b8..614be8f289b 100644 --- a/pkg/traceql/engine_metrics_average.go +++ b/pkg/traceql/engine_metrics_average.go @@ -357,19 +357,18 @@ func newAvgAggregator[F FastStatic, S StaticVals](attr Attribute, by []Attribute } func (g *avgOverTimeSpanAggregator[F, S]) Observe(span Span) { - if !g.getGroupingValues(span) { - return - } - - s := g.getSeries() interval := IntervalOf(span.StartTimeUnixNanos(), g.start, g.end, g.step) if interval == -1 { return } + inc := g.getSpanAttValue(span) if math.IsNaN(inc) { return } + + s := g.getSeries(span) + s.count[interval]++ mean, c := averageInc(s.avg[interval], inc, s.count[interval], s.compensation[interval]) s.avg[interval] = mean @@ -418,17 +417,14 @@ func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { } func (g *avgOverTimeSpanAggregator[F, S]) ObserveExemplar(span Span, value float64, ts uint64) { - if !g.getGroupingValues(span) { - return - } - // Observe exemplar all := span.AllAttributes() lbls := make(Labels, 0, len(all)) for k, v := range span.AllAttributes() { lbls = append(lbls, Label{k.String(), v}) } - s := g.getSeries() + + s := g.getSeries(span) if s.exemplarBuckets.testTotal() { return @@ -498,18 +494,16 @@ func (g *avgOverTimeSpanAggregator[F, S]) Series() SeriesSet { return ss } -func (g *avgOverTimeSpanAggregator[F, S]) getGroupingValues(span Span) bool { +// getSeries gets the series for the current span. +// It will reuse the last series if possible. +func (g *avgOverTimeSpanAggregator[F, S]) getSeries(span Span) avgOverTimeSeries[S] { + // Get Grouping values for i, lookups := range g.byLookups { val := lookup(lookups, span) g.buf.vals[i] = val g.buf.fast[i] = val.MapKey() } - return true -} -// getSeries gets the series for the current span. -// It will reuse the last series if possible. -func (g *avgOverTimeSpanAggregator[F, S]) getSeries() avgOverTimeSeries[S] { // Fast path if g.lastSeries.init && g.lastBuf.fast == g.buf.fast { return g.lastSeries From 86476fbee09f55c9defd424856c28a59e275a3c0 Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Mon, 21 Oct 2024 16:29:22 +0200 Subject: [PATCH 14/29] avoid out of index errors --- pkg/traceql/engine_metrics_average.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/traceql/engine_metrics_average.go b/pkg/traceql/engine_metrics_average.go index 614be8f289b..2b03a5a7017 100644 --- a/pkg/traceql/engine_metrics_average.go +++ b/pkg/traceql/engine_metrics_average.go @@ -196,6 +196,10 @@ func (b *averageOverTimeSeriesAggregator) initSeriesAggregator(in []*tempopb.Tim counterPromLabel = getLabels(ts.Labels, internalLabelMetaType).String() newCountersTS[counterPromLabel] = make([]float64, b.len) for i, sample := range ts.Samples { + pos := IntervalOfMs(sample.TimestampMs, b.start, b.end, b.step) + if pos < 0 || pos > len(b.ss[ts.PromLabels].Values) { + continue + } newCountersTS[counterPromLabel][i] = sample.Value } } From 892294009f85f3192e944d6a2c693f9c3f0ec9be Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Tue, 22 Oct 2024 09:19:28 +0200 Subject: [PATCH 15/29] fix combine --- pkg/traceql/engine_metrics_average.go | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/pkg/traceql/engine_metrics_average.go b/pkg/traceql/engine_metrics_average.go index 2b03a5a7017..8736ce1a384 100644 --- a/pkg/traceql/engine_metrics_average.go +++ b/pkg/traceql/engine_metrics_average.go @@ -195,12 +195,12 @@ func (b *averageOverTimeSeriesAggregator) initSeriesAggregator(in []*tempopb.Tim if strings.Contains(ts.PromLabels, internalLabelMetaType) { counterPromLabel = getLabels(ts.Labels, internalLabelMetaType).String() newCountersTS[counterPromLabel] = make([]float64, b.len) - for i, sample := range ts.Samples { + for _, sample := range ts.Samples { pos := IntervalOfMs(sample.TimestampMs, b.start, b.end, b.step) - if pos < 0 || pos > len(b.ss[ts.PromLabels].Values) { + if pos < 0 || pos > b.len { continue } - newCountersTS[counterPromLabel][i] = sample.Value + newCountersTS[counterPromLabel][pos] = sample.Value } } _, ok := b.ss[ts.PromLabels] @@ -421,15 +421,7 @@ func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { } func (g *avgOverTimeSpanAggregator[F, S]) ObserveExemplar(span Span, value float64, ts uint64) { - // Observe exemplar - all := span.AllAttributes() - lbls := make(Labels, 0, len(all)) - for k, v := range span.AllAttributes() { - lbls = append(lbls, Label{k.String(), v}) - } - s := g.getSeries(span) - if s.exemplarBuckets.testTotal() { return } @@ -438,6 +430,12 @@ func (g *avgOverTimeSpanAggregator[F, S]) ObserveExemplar(span Span, value float return } + all := span.AllAttributes() + lbls := make(Labels, 0, len(all)) + for k, v := range span.AllAttributes() { + lbls = append(lbls, Label{k.String(), v}) + } + s.exemplars = append(s.exemplars, Exemplar{ Labels: lbls, Value: value, From 8cbd7220f12dc112fa2872a6485b75d1e333785f Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Thu, 24 Oct 2024 16:15:48 +0200 Subject: [PATCH 16/29] include compensation --- pkg/traceql/engine_metrics_average.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/traceql/engine_metrics_average.go b/pkg/traceql/engine_metrics_average.go index 8736ce1a384..eacc56a6bac 100644 --- a/pkg/traceql/engine_metrics_average.go +++ b/pkg/traceql/engine_metrics_average.go @@ -479,6 +479,10 @@ func (g *avgOverTimeSpanAggregator[F, S]) Series() SeriesSet { for _, s := range g.series { // First, get the regular series labels, promLabelsAvg := g.labelsFor(s.vals, "") + // Include the compensation at the end + for i := range s.avg { + s.avg[i] = s.avg[i] + s.compensation[i] + } ss[promLabelsAvg] = TimeSeries{ Labels: labels, Values: s.avg, From 26d39086b7ad6ad4eeb5aca2cc75fc6144f0bbb7 Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Tue, 29 Oct 2024 13:00:45 +0100 Subject: [PATCH 17/29] use incremental weighted mean --- pkg/traceql/engine_metrics_average.go | 174 ++++++++++++++++---------- 1 file changed, 110 insertions(+), 64 deletions(-) diff --git a/pkg/traceql/engine_metrics_average.go b/pkg/traceql/engine_metrics_average.go index eacc56a6bac..5c014d4845f 100644 --- a/pkg/traceql/engine_metrics_average.go +++ b/pkg/traceql/engine_metrics_average.go @@ -38,13 +38,13 @@ func (a *averageOverTimeAggregator) init(q *tempopb.QueryRangeRequest, mode Aggr } a.seriesAgg = &averageOverTimeSeriesAggregator{ - ss: make(SeriesSet), - countProm: make(map[string]string), - len: IntervalCount(q.Start, q.End, q.Step), - start: q.Start, - end: q.End, - step: q.Step, - exemplarBuckets: newBucketSet(IntervalCount(q.Start, q.End, q.Step)), + weightedAverageSeries: make(map[string]*kahanSeries), + averageToCountMapper: make(map[string]string), + len: IntervalCount(q.Start, q.End, q.Step), + start: q.Start, + end: q.End, + step: q.Step, + exemplarBuckets: newBucketSet(IntervalCount(q.Start, q.End, q.Step)), } if mode == AggregateModeRaw { @@ -139,11 +139,47 @@ func (a *averageOverTimeAggregator) String() string { } type averageOverTimeSeriesAggregator struct { - ss SeriesSet - countProm map[string]string - len int - start, end, step uint64 - exemplarBuckets *bucketSet + weightedAverageSeries map[string]*kahanSeries + // mapping between average series with their weights + averageToCountMapper map[string]string + len int + start, end, step uint64 + exemplarBuckets *bucketSet +} + +type kahanValue struct { + sum float64 + compensation float64 +} +type kahanSeries struct { + values []kahanValue + labels Labels + Exemplars []Exemplar +} + +// it adds the compensation to the final value to retain precission +func (k *kahanSeries) getFinal() []float64 { + final := make([]float64, len(k.values)) + for i, v := range k.values { + final[i] = v.sum + v.compensation + } + return final +} + +func newKahanSeries(len int, lenExemplars int, labels Labels, initToNan bool) *kahanSeries { + s := &kahanSeries{ + values: make([]kahanValue, len), + labels: labels, + } + // Init the sum to discriminate between uninitialized values and 0 + if initToNan { + s.Exemplars = make([]Exemplar, 0, lenExemplars) + for i := range s.values { + s.values[i].sum = nan + } + } + + return s } var ( @@ -152,78 +188,80 @@ var ( ) func (b *averageOverTimeSeriesAggregator) Combine(in []*tempopb.TimeSeries) { - newCountersTS := make(map[string][]float64) + // We traverse the TimeSeries first to initialize and map the count series + countPosMapper := make(map[string]int, len(in)/2) + for i, ts := range in { + _, ok := b.weightedAverageSeries[ts.PromLabels] + isCountSeries := strings.Contains(ts.PromLabels, internalLabelMetaType) - b.initSeriesAggregator(in, newCountersTS) + if !ok { + promLabels := getLabels(ts.Labels, "") + + if isCountSeries { + avgSeriesPromLabel := getLabels(ts.Labels, internalLabelMetaType).String() + // we need to map the average series with the count one for fast-seeking + b.averageToCountMapper[avgSeriesPromLabel] = ts.PromLabels + } + b.weightedAverageSeries[ts.PromLabels] = newKahanSeries(b.len, len(ts.Exemplars), promLabels, !isCountSeries) + } + if isCountSeries { + // mapping of the position of the count series in the time series array + countPosMapper[ts.PromLabels] = i + } + } for _, ts := range in { - counterLabel, ok := b.countProm[ts.PromLabels] + counterLabel, ok := b.averageToCountMapper[ts.PromLabels] if !ok { // This is a counter label, we can skip it continue } - existing := b.ss[ts.PromLabels] for _, sample := range ts.Samples { pos := IntervalOfMs(sample.TimestampMs, b.start, b.end, b.step) - if pos < 0 || pos > len(b.ss[ts.PromLabels].Values) { + if pos < 0 || pos > len(b.weightedAverageSeries[ts.PromLabels].values) { continue } - currentAvg := b.ss[ts.PromLabels].Values[pos] + currentMean := b.weightedAverageSeries[ts.PromLabels].values[pos] + currentWeight := b.weightedAverageSeries[counterLabel].values[pos] newAvg := sample.Value - currentCount := b.ss[counterLabel].Values[pos] - newCount := newCountersTS[ts.PromLabels][pos] - - if math.IsNaN(currentAvg) && !math.IsNaN(newAvg) { - b.ss[ts.PromLabels].Values[pos] = newAvg - b.ss[counterLabel].Values[pos] = newCount - } else if !math.IsNaN(newAvg) { - // Weighted mean - avg := (currentAvg*currentCount + newAvg*newCount) / (currentCount + newCount) - b.ss[ts.PromLabels].Values[pos] = avg - b.ss[counterLabel].Values[pos] = currentCount + newCount - } - } + newWeight := in[countPosMapper[counterLabel]].Samples[pos].Value - b.aggregateExemplars(ts, &existing) - b.ss[ts.PromLabels] = existing + mean, weight := b.addWeigthedMean(currentMean, currentWeight, newAvg, newWeight) + + b.weightedAverageSeries[ts.PromLabels].values[pos] = mean + b.weightedAverageSeries[counterLabel].values[pos] = weight + b.aggregateExemplars(ts, b.weightedAverageSeries[ts.PromLabels]) + } } } -func (b *averageOverTimeSeriesAggregator) initSeriesAggregator(in []*tempopb.TimeSeries, newCountersTS map[string][]float64) { - for _, ts := range in { - counterPromLabel := "" - if strings.Contains(ts.PromLabels, internalLabelMetaType) { - counterPromLabel = getLabels(ts.Labels, internalLabelMetaType).String() - newCountersTS[counterPromLabel] = make([]float64, b.len) - for _, sample := range ts.Samples { - pos := IntervalOfMs(sample.TimestampMs, b.start, b.end, b.step) - if pos < 0 || pos > b.len { - continue - } - newCountersTS[counterPromLabel][pos] = sample.Value - } +// It calculates the weighted mean using kahan-neumaier summation and a delta approach. +// By adding incremental values we prevent overflow +func (b averageOverTimeSeriesAggregator) addWeigthedMean(currentMean kahanValue, currentWeight kahanValue, newMean float64, newWeight float64) (kahanValue, kahanValue) { + if math.IsNaN(currentMean.sum) && !math.IsNaN(newMean) { + return kahanValue{sum: newMean, compensation: 0}, kahanValue{sum: newWeight, compensation: 0} + } + if math.IsInf(currentMean.sum, 0) { + if math.IsInf(newMean, 0) && (currentMean.sum > 0) == (newMean > 0) { + return currentMean, currentWeight } - _, ok := b.ss[ts.PromLabels] - if !ok { - labels := getLabels(ts.Labels, "") - n := TimeSeries{ - Labels: labels, - Values: make([]float64, b.len), - Exemplars: make([]Exemplar, 0, len(ts.Exemplars)), - } - if counterPromLabel != "" { - b.countProm[counterPromLabel] = ts.PromLabels - } else { - for i := range n.Values { - n.Values[i] = nan - } - } - b.ss[ts.PromLabels] = n + if !math.IsInf(newMean, 0) && !math.IsNaN(newMean) { + return currentMean, currentWeight } } + weightDelta := newWeight - currentWeight.compensation + weight, weightCompensation := kahanSumInc(currentWeight.sum, weightDelta, currentWeight.compensation) + + // Using a delta increment to avoid overflow + meanDelta := ((newMean - currentMean.sum) * newWeight) / weight + meanDelta -= currentMean.compensation + + mean, meanCompensation := kahanSumInc(currentMean.sum, meanDelta, currentMean.compensation) + + return kahanValue{sum: mean, compensation: meanCompensation}, kahanValue{sum: weight, compensation: weightCompensation} } -func (b *averageOverTimeSeriesAggregator) aggregateExemplars(ts *tempopb.TimeSeries, existing *TimeSeries) { +func (b *averageOverTimeSeriesAggregator) aggregateExemplars(ts *tempopb.TimeSeries, existing *kahanSeries) { for _, exemplar := range ts.Exemplars { if b.exemplarBuckets.testTotal() { break @@ -266,7 +304,15 @@ func getLabels(vals []v1.KeyValue, skipKey string) Labels { } func (b *averageOverTimeSeriesAggregator) Results() SeriesSet { - return b.ss + ss := SeriesSet{} + for k, v := range b.weightedAverageSeries { + ss[k] = TimeSeries{ + Labels: v.labels, + Values: v.getFinal(), + Exemplars: v.Exemplars, + } + } + return ss } // Accumulated results of average over time From 85dd9c17fbb3e210d6ece2150bbb6ae53b2d66d7 Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Tue, 29 Oct 2024 16:16:26 +0100 Subject: [PATCH 18/29] fix panic --- pkg/traceql/engine_metrics_average.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/traceql/engine_metrics_average.go b/pkg/traceql/engine_metrics_average.go index 5c014d4845f..0cbb812a622 100644 --- a/pkg/traceql/engine_metrics_average.go +++ b/pkg/traceql/engine_metrics_average.go @@ -217,12 +217,13 @@ func (b *averageOverTimeSeriesAggregator) Combine(in []*tempopb.TimeSeries) { } for _, sample := range ts.Samples { pos := IntervalOfMs(sample.TimestampMs, b.start, b.end, b.step) - if pos < 0 || pos > len(b.weightedAverageSeries[ts.PromLabels].values) { + if pos < 0 || pos >= len(b.weightedAverageSeries[ts.PromLabels].values) { continue } currentMean := b.weightedAverageSeries[ts.PromLabels].values[pos] currentWeight := b.weightedAverageSeries[counterLabel].values[pos] + newAvg := sample.Value newWeight := in[countPosMapper[counterLabel]].Samples[pos].Value From a859d3f25b87f00fc8b742bb6b012f484b9bf18c Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Tue, 29 Oct 2024 17:51:25 +0100 Subject: [PATCH 19/29] drop NaN count series to match average ones --- pkg/traceql/engine_metrics_average.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/pkg/traceql/engine_metrics_average.go b/pkg/traceql/engine_metrics_average.go index 0cbb812a622..0f30b7efc7d 100644 --- a/pkg/traceql/engine_metrics_average.go +++ b/pkg/traceql/engine_metrics_average.go @@ -215,7 +215,7 @@ func (b *averageOverTimeSeriesAggregator) Combine(in []*tempopb.TimeSeries) { // This is a counter label, we can skip it continue } - for _, sample := range ts.Samples { + for i, sample := range ts.Samples { pos := IntervalOfMs(sample.TimestampMs, b.start, b.end, b.step) if pos < 0 || pos >= len(b.weightedAverageSeries[ts.PromLabels].values) { continue @@ -225,7 +225,7 @@ func (b *averageOverTimeSeriesAggregator) Combine(in []*tempopb.TimeSeries) { currentWeight := b.weightedAverageSeries[counterLabel].values[pos] newAvg := sample.Value - newWeight := in[countPosMapper[counterLabel]].Samples[pos].Value + newWeight := in[countPosMapper[counterLabel]].Samples[i].Value mean, weight := b.addWeigthedMean(currentMean, currentWeight, newAvg, newWeight) @@ -419,18 +419,18 @@ func (g *avgOverTimeSpanAggregator[F, S]) Observe(span Span) { } s := g.getSeries(span) - - s.count[interval]++ + if math.IsNaN(s.avg[interval]) && !math.IsNaN(inc) { + // When we have a proper value in the span we need to initialize to 0 + s.avg[interval] = 0 + s.count[interval] = 1 + } mean, c := averageInc(s.avg[interval], inc, s.count[interval], s.compensation[interval]) + s.count[interval]++ s.avg[interval] = mean s.compensation[interval] = c } func averageInc(mean, inc, count, compensation float64) (float64, float64) { - if math.IsNaN(mean) && !math.IsNaN(inc) { - // When we have a proper value in the span we need to initialize to 0 - mean = 0 - } if math.IsInf(mean, 0) { if math.IsInf(inc, 0) && (mean > 0) == (inc > 0) { // The `current.val` and `new` values are `Inf` of the same sign. They @@ -576,6 +576,7 @@ func (g *avgOverTimeSpanAggregator[F, S]) getSeries(span Span) avgOverTimeSeries } for i := 0; i < intervals; i++ { s.avg[i] = math.Float64frombits(normalNaN) + s.count[i] = math.Float64frombits(normalNaN) } g.series[g.buf.fast] = s From e82b56d2cc7774888a82d4c2852c6e5e0325b0df Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Wed, 30 Oct 2024 13:15:36 +0100 Subject: [PATCH 20/29] refactor code --- pkg/traceql/engine_metrics_average.go | 166 ++++++++++++++------------ 1 file changed, 87 insertions(+), 79 deletions(-) diff --git a/pkg/traceql/engine_metrics_average.go b/pkg/traceql/engine_metrics_average.go index 0f30b7efc7d..63f95a6926e 100644 --- a/pkg/traceql/engine_metrics_average.go +++ b/pkg/traceql/engine_metrics_average.go @@ -38,7 +38,7 @@ func (a *averageOverTimeAggregator) init(q *tempopb.QueryRangeRequest, mode Aggr } a.seriesAgg = &averageOverTimeSeriesAggregator{ - weightedAverageSeries: make(map[string]*kahanSeries), + weightedAverageSeries: make(map[string]averageSeries), averageToCountMapper: make(map[string]string), len: IntervalCount(q.Start, q.End, q.Step), start: q.Start, @@ -139,7 +139,7 @@ func (a *averageOverTimeAggregator) String() string { } type averageOverTimeSeriesAggregator struct { - weightedAverageSeries map[string]*kahanSeries + weightedAverageSeries map[string]averageSeries // mapping between average series with their weights averageToCountMapper map[string]string len int @@ -147,39 +147,89 @@ type averageOverTimeSeriesAggregator struct { exemplarBuckets *bucketSet } -type kahanValue struct { +type averageValue struct { sum float64 compensation float64 + weight float64 } -type kahanSeries struct { - values []kahanValue + +func (a *averageValue) add(inc float64) { + if math.IsInf(a.sum, 0) { + if math.IsInf(inc, 0) && (a.sum > 0) == (inc > 0) { + return + } + if !math.IsInf(inc, 0) && !math.IsNaN(inc) { + return + } + } + val, c := kahanSumInc(inc, a.sum, a.compensation) + a.sum = val + a.compensation = c +} + +type averageSeries struct { + values []averageValue labels Labels Exemplars []Exemplar } +func newAverageSeries(len int, lenExemplars int, labels Labels) averageSeries { + s := averageSeries{ + values: make([]averageValue, len), + labels: labels, + Exemplars: make([]Exemplar, 0, lenExemplars), + } + + for i := range s.values { + s.values[i].sum = nan + s.values[i].weight = nan + } + return s +} + // it adds the compensation to the final value to retain precission -func (k *kahanSeries) getFinal() []float64 { - final := make([]float64, len(k.values)) +func (k *averageSeries) getAvgSeries() TimeSeries { + ts := TimeSeries{ + Labels: k.labels, + Values: make([]float64, len(k.values)), + Exemplars: k.Exemplars, + } + for i, v := range k.values { - final[i] = v.sum + v.compensation + ts.Values[i] = v.sum + v.compensation } - return final + return ts } -func newKahanSeries(len int, lenExemplars int, labels Labels, initToNan bool) *kahanSeries { - s := &kahanSeries{ - values: make([]kahanValue, len), - labels: labels, +func (k *averageSeries) getCountSeries() TimeSeries { + countLabels := append(k.labels, Label{internalLabelMetaType, NewStaticString(internalMetaTypeCount)}) + ts := TimeSeries{ + Labels: countLabels, + Values: make([]float64, len(k.values)), } - // Init the sum to discriminate between uninitialized values and 0 - if initToNan { - s.Exemplars = make([]Exemplar, 0, lenExemplars) - for i := range s.values { - s.values[i].sum = nan - } + for i, v := range k.values { + ts.Values[i] = v.weight } + return ts +} - return s +// It calculates the incremental weighted mean using kahan-neumaier summation and a delta approach. +// By adding incremental values we prevent overflow +func (k *averageSeries) addWeigthedMean(interval int, mean float64, weight float64) { + currentMean := k.values[interval] + if math.IsNaN(currentMean.sum) && !math.IsNaN(mean) { + k.values[interval] = averageValue{sum: mean, weight: weight} + return + } + + sumWeights := currentMean.weight + weight + // Using a delta increment to avoid overflow + meanDelta := ((mean - currentMean.sum) * weight) / sumWeights + meanDelta -= currentMean.compensation + + currentMean.add(meanDelta) + + k.values[interval] = currentMean } var ( @@ -192,25 +242,18 @@ func (b *averageOverTimeSeriesAggregator) Combine(in []*tempopb.TimeSeries) { countPosMapper := make(map[string]int, len(in)/2) for i, ts := range in { _, ok := b.weightedAverageSeries[ts.PromLabels] - isCountSeries := strings.Contains(ts.PromLabels, internalLabelMetaType) - - if !ok { - promLabels := getLabels(ts.Labels, "") - - if isCountSeries { - avgSeriesPromLabel := getLabels(ts.Labels, internalLabelMetaType).String() - // we need to map the average series with the count one for fast-seeking - b.averageToCountMapper[avgSeriesPromLabel] = ts.PromLabels - } - b.weightedAverageSeries[ts.PromLabels] = newKahanSeries(b.len, len(ts.Exemplars), promLabels, !isCountSeries) - } - if isCountSeries { + if strings.Contains(ts.PromLabels, internalLabelMetaType) { + // Label series without the count metatype, this will match with its average series + avgSeriesPromLabel := getLabels(ts.Labels, internalLabelMetaType).String() // mapping of the position of the count series in the time series array - countPosMapper[ts.PromLabels] = i + countPosMapper[avgSeriesPromLabel] = i + } else if !ok { + promLabels := getLabels(ts.Labels, "") + b.weightedAverageSeries[ts.PromLabels] = newAverageSeries(b.len, len(ts.Exemplars), promLabels) } } for _, ts := range in { - counterLabel, ok := b.averageToCountMapper[ts.PromLabels] + existing, ok := b.weightedAverageSeries[ts.PromLabels] if !ok { // This is a counter label, we can skip it continue @@ -221,48 +264,15 @@ func (b *averageOverTimeSeriesAggregator) Combine(in []*tempopb.TimeSeries) { continue } - currentMean := b.weightedAverageSeries[ts.PromLabels].values[pos] - currentWeight := b.weightedAverageSeries[counterLabel].values[pos] - - newAvg := sample.Value - newWeight := in[countPosMapper[counterLabel]].Samples[i].Value - - mean, weight := b.addWeigthedMean(currentMean, currentWeight, newAvg, newWeight) - - b.weightedAverageSeries[ts.PromLabels].values[pos] = mean - b.weightedAverageSeries[counterLabel].values[pos] = weight + incomingMean := sample.Value + incomingWeight := in[countPosMapper[ts.PromLabels]].Samples[i].Value + existing.addWeigthedMean(pos, incomingMean, incomingWeight) b.aggregateExemplars(ts, b.weightedAverageSeries[ts.PromLabels]) } } } -// It calculates the weighted mean using kahan-neumaier summation and a delta approach. -// By adding incremental values we prevent overflow -func (b averageOverTimeSeriesAggregator) addWeigthedMean(currentMean kahanValue, currentWeight kahanValue, newMean float64, newWeight float64) (kahanValue, kahanValue) { - if math.IsNaN(currentMean.sum) && !math.IsNaN(newMean) { - return kahanValue{sum: newMean, compensation: 0}, kahanValue{sum: newWeight, compensation: 0} - } - if math.IsInf(currentMean.sum, 0) { - if math.IsInf(newMean, 0) && (currentMean.sum > 0) == (newMean > 0) { - return currentMean, currentWeight - } - if !math.IsInf(newMean, 0) && !math.IsNaN(newMean) { - return currentMean, currentWeight - } - } - weightDelta := newWeight - currentWeight.compensation - weight, weightCompensation := kahanSumInc(currentWeight.sum, weightDelta, currentWeight.compensation) - - // Using a delta increment to avoid overflow - meanDelta := ((newMean - currentMean.sum) * newWeight) / weight - meanDelta -= currentMean.compensation - - mean, meanCompensation := kahanSumInc(currentMean.sum, meanDelta, currentMean.compensation) - - return kahanValue{sum: mean, compensation: meanCompensation}, kahanValue{sum: weight, compensation: weightCompensation} -} - -func (b *averageOverTimeSeriesAggregator) aggregateExemplars(ts *tempopb.TimeSeries, existing *kahanSeries) { +func (b *averageOverTimeSeriesAggregator) aggregateExemplars(ts *tempopb.TimeSeries, existing averageSeries) { for _, exemplar := range ts.Exemplars { if b.exemplarBuckets.testTotal() { break @@ -307,11 +317,9 @@ func getLabels(vals []v1.KeyValue, skipKey string) Labels { func (b *averageOverTimeSeriesAggregator) Results() SeriesSet { ss := SeriesSet{} for k, v := range b.weightedAverageSeries { - ss[k] = TimeSeries{ - Labels: v.labels, - Values: v.getFinal(), - Exemplars: v.Exemplars, - } + ss[k] = v.getAvgSeries() + countSeries := v.getCountSeries() + ss[countSeries.Labels.String()] = countSeries } return ss } @@ -422,10 +430,10 @@ func (g *avgOverTimeSpanAggregator[F, S]) Observe(span Span) { if math.IsNaN(s.avg[interval]) && !math.IsNaN(inc) { // When we have a proper value in the span we need to initialize to 0 s.avg[interval] = 0 - s.count[interval] = 1 + s.count[interval] = 0 } - mean, c := averageInc(s.avg[interval], inc, s.count[interval], s.compensation[interval]) s.count[interval]++ + mean, c := averageInc(s.avg[interval], inc, s.count[interval], s.compensation[interval]) s.avg[interval] = mean s.compensation[interval] = c } From cbf4042cfc34d5c0e7aceace59714c09726c15f7 Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Wed, 30 Oct 2024 14:46:01 +0100 Subject: [PATCH 21/29] refactor code --- pkg/traceql/engine_metrics_average.go | 133 +++++++++----------------- 1 file changed, 44 insertions(+), 89 deletions(-) diff --git a/pkg/traceql/engine_metrics_average.go b/pkg/traceql/engine_metrics_average.go index 63f95a6926e..7afd69259d8 100644 --- a/pkg/traceql/engine_metrics_average.go +++ b/pkg/traceql/engine_metrics_average.go @@ -167,6 +167,21 @@ func (a *averageValue) add(inc float64) { a.compensation = c } +func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { + t := sum + inc + switch { + case math.IsInf(t, 0): + c = 0 + + // Using Neumaier improvement, swap if next term larger than sum. + case math.Abs(sum) >= math.Abs(inc): + c += (sum - t) + inc + default: + c += (inc - t) + sum + } + return t, c +} + type averageSeries struct { values []averageValue labels Labels @@ -213,6 +228,18 @@ func (k *averageSeries) getCountSeries() TimeSeries { return ts } +// It increments the average +func (k *averageSeries) addMeanIncrement(interval int, inc float64) { + currentMean := k.values[interval] + if math.IsNaN(currentMean.sum) && !math.IsNaN(inc) { + k.values[interval] = averageValue{sum: inc, weight: 1} + return + } + currentMean.weight++ + currentMean.add(inc/currentMean.weight - currentMean.sum/currentMean.weight) + k.values[interval] = currentMean +} + // It calculates the incremental weighted mean using kahan-neumaier summation and a delta approach. // By adding incremental values we prevent overflow func (k *averageSeries) addWeigthedMean(interval int, mean float64, weight float64) { @@ -326,13 +353,10 @@ func (b *averageOverTimeSeriesAggregator) Results() SeriesSet { // Accumulated results of average over time type avgOverTimeSeries[S StaticVals] struct { - avg []float64 - count []float64 - compensation []float64 - exemplars []Exemplar + average averageSeries exemplarBuckets *bucketSet - init bool vals S + initialized bool } // In charge of calculating the average over time for a set of spans @@ -427,52 +451,7 @@ func (g *avgOverTimeSpanAggregator[F, S]) Observe(span Span) { } s := g.getSeries(span) - if math.IsNaN(s.avg[interval]) && !math.IsNaN(inc) { - // When we have a proper value in the span we need to initialize to 0 - s.avg[interval] = 0 - s.count[interval] = 0 - } - s.count[interval]++ - mean, c := averageInc(s.avg[interval], inc, s.count[interval], s.compensation[interval]) - s.avg[interval] = mean - s.compensation[interval] = c -} - -func averageInc(mean, inc, count, compensation float64) (float64, float64) { - if math.IsInf(mean, 0) { - if math.IsInf(inc, 0) && (mean > 0) == (inc > 0) { - // The `current.val` and `new` values are `Inf` of the same sign. They - // can't be subtracted, but the value of `current.val` is correct - // already. - return mean, compensation - } - if !math.IsInf(inc, 0) && !math.IsNaN(inc) { - // At this stage, the current.val is an infinite. If the added - // value is neither an Inf or a Nan, we can keep that mean - // value. - // This is required because our calculation below removes - // the mean value, which would look like Inf += x - Inf and - // end up as a NaN. - return mean, compensation - } - } - mean, c := kahanSumInc(inc/count-mean/count, mean, compensation) - return mean, c -} - -func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { - t := sum + inc - switch { - case math.IsInf(t, 0): - c = 0 - - // Using Neumaier improvement, swap if next term larger than sum. - case math.Abs(sum) >= math.Abs(inc): - c += (sum - t) + inc - default: - c += (inc - t) + sum - } - return t, c + s.average.addMeanIncrement(interval, inc) } func (g *avgOverTimeSpanAggregator[F, S]) ObserveExemplar(span Span, value float64, ts uint64) { @@ -491,7 +470,7 @@ func (g *avgOverTimeSpanAggregator[F, S]) ObserveExemplar(span Span, value float lbls = append(lbls, Label{k.String(), v}) } - s.exemplars = append(s.exemplars, Exemplar{ + s.average.Exemplars = append(s.average.Exemplars, Exemplar{ Labels: lbls, Value: value, TimestampMs: ts, @@ -499,13 +478,10 @@ func (g *avgOverTimeSpanAggregator[F, S]) ObserveExemplar(span Span, value float g.series[g.buf.fast] = s } -func (g *avgOverTimeSpanAggregator[F, S]) labelsFor(vals S, t string) (Labels, string) { +func (g *avgOverTimeSpanAggregator[F, S]) labelsFor(vals S) (Labels, string) { if g.by == nil { serieLabel := make(Labels, 1, 2) serieLabel[0] = Label{labels.MetricName, NewStaticString(metricsAggregateAvgOverTime.String())} - if t != "" { - serieLabel = append(serieLabel, Label{internalLabelMetaType, NewStaticString(t)}) - } return serieLabel, serieLabel.String() } labels := make(Labels, 0, len(g.by)+1) @@ -521,10 +497,6 @@ func (g *avgOverTimeSpanAggregator[F, S]) labelsFor(vals S, t string) (Labels, s labels = append(labels, Label{g.by[0].String(), NewStaticNil()}) } - if t != "" { - labels = append(labels, Label{internalLabelMetaType, NewStaticString(t)}) - } - return labels, labels.String() } @@ -532,24 +504,15 @@ func (g *avgOverTimeSpanAggregator[F, S]) Series() SeriesSet { ss := SeriesSet{} for _, s := range g.series { - // First, get the regular series - labels, promLabelsAvg := g.labelsFor(s.vals, "") - // Include the compensation at the end - for i := range s.avg { - s.avg[i] = s.avg[i] + s.compensation[i] - } - ss[promLabelsAvg] = TimeSeries{ - Labels: labels, - Values: s.avg, - Exemplars: s.exemplars, - } - // Second, get the "count" series - labels, promLabelsCount := g.labelsFor(s.vals, internalMetaTypeCount) - ss[promLabelsCount] = TimeSeries{ - Labels: labels, - Values: s.count, - Exemplars: []Exemplar{}, - } + labels, promLabelsAvg := g.labelsFor(s.vals) + s.average.labels = labels + // Average series + averageSeries := s.average.getAvgSeries() + // Count series + countSeries := s.average.getCountSeries() + + ss[promLabelsAvg] = averageSeries + ss[countSeries.Labels.String()] = countSeries } return ss @@ -566,7 +529,7 @@ func (g *avgOverTimeSpanAggregator[F, S]) getSeries(span Span) avgOverTimeSeries } // Fast path - if g.lastSeries.init && g.lastBuf.fast == g.buf.fast { + if g.lastBuf.fast == g.buf.fast && g.lastSeries.initialized { return g.lastSeries } @@ -574,19 +537,11 @@ func (g *avgOverTimeSpanAggregator[F, S]) getSeries(span Span) avgOverTimeSeries if !ok { intervals := IntervalCount(g.start, g.end, g.step) s = avgOverTimeSeries[S]{ - init: true, vals: g.buf.vals, - count: make([]float64, intervals), - avg: make([]float64, intervals), - compensation: make([]float64, intervals), - exemplars: make([]Exemplar, 0, maxExemplars), + average: newAverageSeries(intervals, maxExemplars, nil), exemplarBuckets: newBucketSet(intervals), + initialized: true, } - for i := 0; i < intervals; i++ { - s.avg[i] = math.Float64frombits(normalNaN) - s.count[i] = math.Float64frombits(normalNaN) - } - g.series[g.buf.fast] = s } From f5e9c71fe73e31e1e0cf84f9980c27cc03858b96 Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Wed, 30 Oct 2024 14:54:46 +0100 Subject: [PATCH 22/29] some improvements --- pkg/traceql/engine_metrics_average.go | 57 ++++++++++++++------------- 1 file changed, 30 insertions(+), 27 deletions(-) diff --git a/pkg/traceql/engine_metrics_average.go b/pkg/traceql/engine_metrics_average.go index 7afd69259d8..0b6d934a7cd 100644 --- a/pkg/traceql/engine_metrics_average.go +++ b/pkg/traceql/engine_metrics_average.go @@ -39,7 +39,6 @@ func (a *averageOverTimeAggregator) init(q *tempopb.QueryRangeRequest, mode Aggr a.seriesAgg = &averageOverTimeSeriesAggregator{ weightedAverageSeries: make(map[string]averageSeries), - averageToCountMapper: make(map[string]string), len: IntervalCount(q.Start, q.End, q.Step), start: q.Start, end: q.End, @@ -140,30 +139,36 @@ func (a *averageOverTimeAggregator) String() string { type averageOverTimeSeriesAggregator struct { weightedAverageSeries map[string]averageSeries - // mapping between average series with their weights - averageToCountMapper map[string]string - len int - start, end, step uint64 - exemplarBuckets *bucketSet + len int + start, end, step uint64 + exemplarBuckets *bucketSet } type averageValue struct { - sum float64 + mean float64 compensation float64 weight float64 } +// Adds an increment to the existing mean using Kahan sumnmation algorithm. +// The compensation is accumulated and not applied to reduce the error func (a *averageValue) add(inc float64) { - if math.IsInf(a.sum, 0) { - if math.IsInf(inc, 0) && (a.sum > 0) == (inc > 0) { + if math.IsInf(a.mean, 0) { + if math.IsInf(inc, 0) && (a.mean > 0) == (inc > 0) { + // The `mean` and `ic` values are `Inf` of the same sign. They + // can't be subtracted, but the value of `mean` is correct + // already. return } if !math.IsInf(inc, 0) && !math.IsNaN(inc) { + // At this stage, the mean is an infinite. If the added + // value is neither an Inf or a Nan, we can keep that mean + // value. return } } - val, c := kahanSumInc(inc, a.sum, a.compensation) - a.sum = val + val, c := kahanSumInc(inc, a.mean, a.compensation) + a.mean = val a.compensation = c } @@ -194,9 +199,9 @@ func newAverageSeries(len int, lenExemplars int, labels Labels) averageSeries { labels: labels, Exemplars: make([]Exemplar, 0, lenExemplars), } - + // Init to nan to discriminate uninitialized values from 0 for i := range s.values { - s.values[i].sum = nan + s.values[i].mean = nan s.values[i].weight = nan } return s @@ -211,7 +216,7 @@ func (k *averageSeries) getAvgSeries() TimeSeries { } for i, v := range k.values { - ts.Values[i] = v.sum + v.compensation + ts.Values[i] = v.mean + v.compensation } return ts } @@ -228,15 +233,15 @@ func (k *averageSeries) getCountSeries() TimeSeries { return ts } -// It increments the average -func (k *averageSeries) addMeanIncrement(interval int, inc float64) { +// It increments the mean based on a new value +func (k *averageSeries) addIncrementMean(interval int, inc float64) { currentMean := k.values[interval] - if math.IsNaN(currentMean.sum) && !math.IsNaN(inc) { - k.values[interval] = averageValue{sum: inc, weight: 1} + if math.IsNaN(currentMean.mean) && !math.IsNaN(inc) { + k.values[interval] = averageValue{mean: inc, weight: 1} return } currentMean.weight++ - currentMean.add(inc/currentMean.weight - currentMean.sum/currentMean.weight) + currentMean.add(inc/currentMean.weight - currentMean.mean/currentMean.weight) k.values[interval] = currentMean } @@ -244,18 +249,16 @@ func (k *averageSeries) addMeanIncrement(interval int, inc float64) { // By adding incremental values we prevent overflow func (k *averageSeries) addWeigthedMean(interval int, mean float64, weight float64) { currentMean := k.values[interval] - if math.IsNaN(currentMean.sum) && !math.IsNaN(mean) { - k.values[interval] = averageValue{sum: mean, weight: weight} + if math.IsNaN(currentMean.mean) && !math.IsNaN(mean) { + k.values[interval] = averageValue{mean: mean, weight: weight} return } sumWeights := currentMean.weight + weight - // Using a delta increment to avoid overflow - meanDelta := ((mean - currentMean.sum) * weight) / sumWeights + meanDelta := ((mean - currentMean.mean) * weight) / sumWeights meanDelta -= currentMean.compensation currentMean.add(meanDelta) - k.values[interval] = currentMean } @@ -265,7 +268,7 @@ var ( ) func (b *averageOverTimeSeriesAggregator) Combine(in []*tempopb.TimeSeries) { - // We traverse the TimeSeries first to initialize and map the count series + // We traverse the TimeSeries to initialize new TimeSeries and map the counter series with the position in the `in` array countPosMapper := make(map[string]int, len(in)/2) for i, ts := range in { _, ok := b.weightedAverageSeries[ts.PromLabels] @@ -282,7 +285,7 @@ func (b *averageOverTimeSeriesAggregator) Combine(in []*tempopb.TimeSeries) { for _, ts := range in { existing, ok := b.weightedAverageSeries[ts.PromLabels] if !ok { - // This is a counter label, we can skip it + // This is a counter series, we can skip it continue } for i, sample := range ts.Samples { @@ -451,7 +454,7 @@ func (g *avgOverTimeSpanAggregator[F, S]) Observe(span Span) { } s := g.getSeries(span) - s.average.addMeanIncrement(interval, inc) + s.average.addIncrementMean(interval, inc) } func (g *avgOverTimeSpanAggregator[F, S]) ObserveExemplar(span Span, value float64, ts uint64) { From 9e94c2635c531159b219315dd0da4c46dd9f98ae Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Wed, 30 Oct 2024 15:43:34 +0100 Subject: [PATCH 23/29] fix bug with nil string --- pkg/traceql/engine_metrics.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/traceql/engine_metrics.go b/pkg/traceql/engine_metrics.go index 44f4de6cffe..7c34f51bb8e 100644 --- a/pkg/traceql/engine_metrics.go +++ b/pkg/traceql/engine_metrics.go @@ -178,10 +178,12 @@ func (ls Labels) String() string { promValue = "" case l.Value.Type == TypeString: s := l.Value.EncodeToString(false) - if s != "" { - promValue = s - } else { + if s == "nil" { + promValue = "" + } else if s == "" { promValue = "" + } else { + promValue = s } default: promValue = l.Value.EncodeToString(false) From 350ac4073fdf9e8361b9fffd496b1210af2708b2 Mon Sep 17 00:00:00 2001 From: javiermolinar Date: Thu, 31 Oct 2024 16:53:00 +0100 Subject: [PATCH 24/29] doc changes --- .../tempo/traceql/metrics-queries/_index.md | 2 +- .../traceql/metrics-queries/functions.md | 22 ++++++++++++++++--- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/docs/sources/tempo/traceql/metrics-queries/_index.md b/docs/sources/tempo/traceql/metrics-queries/_index.md index 9f4c2a2fe77..ece2a2e0486 100644 --- a/docs/sources/tempo/traceql/metrics-queries/_index.md +++ b/docs/sources/tempo/traceql/metrics-queries/_index.md @@ -60,7 +60,7 @@ Refer to [Solve problems using metrics queries](./solve-problems-metrics-queries ### Functions -TraceQL metrics queries currently include the following functions for aggregating over groups of spans: `rate`, `count_over_time`, `quantile_over_time`, `histogram_over_time`, and `compare`. +TraceQL metrics queries currently include the following functions for aggregating over groups of spans: `rate`, `count_over_time`,`max_over_time`,`min_over_time`,`avg_over_time`, `quantile_over_time`, `histogram_over_time`, and `compare`. These functions can be added as an operator at the end of any TraceQL query. For detailed information and example queries for each function, refer to [TraceQL metrics functions](./functions). diff --git a/docs/sources/tempo/traceql/metrics-queries/functions.md b/docs/sources/tempo/traceql/metrics-queries/functions.md index 2c20a34674f..01de1280265 100644 --- a/docs/sources/tempo/traceql/metrics-queries/functions.md +++ b/docs/sources/tempo/traceql/metrics-queries/functions.md @@ -12,7 +12,7 @@ keywords: -TraceQL supports `rate`, `count_over_time`, `quantile_over_time`, `histogram_over_time`, and `compare` functions. +TraceQL supports `rate`, `count_over_time`,`min_over_time`,`avg_over_time`, `quantile_over_time`, `histogram_over_time`, and `compare` functions. ## Available functions @@ -30,6 +30,9 @@ These functions can be added as an operator at the end of any TraceQL query. `max_over_time` : Returns the minimum value for the specified attribute across all matching spans per time interval (refer to the [`step` API parameter](https://grafana.com/docs/tempo//api_docs/#traceql-metrics)). +`avg_over_time` +: Returns the average value for the specified attribute across all matching spans per time interval (refer to the [`step` API parameter](https://grafana.com/docs/tempo//api_docs/#traceql-metrics)). + `quantile_over_time` : The quantile of the values in the specified interval @@ -94,7 +97,7 @@ This example counts the number of spans with name `"GET /:endpoint"` broken down ``` -## The `min_over_time` and `max_over_time` functions +## The `min_over_time`, `max_over_time` and `avg_over_time` functions The `min_over_time()` function lets you aggregate numerical attributes by calculating their minimum value. For example, you could choose to calculate the minimum duration of a group of spans, or you could choose to calculate the minimum value of a custom attribute you've attached to your spans, like `span.shopping.cart.entries`. @@ -103,11 +106,14 @@ The time interval that the minimum is computed over is set by the `step` paramet The `max_over_time()` let you aggregate numerical values by computing the maximum value of them, such as the all important span duration. The time interval that the maximum is computer over is set by the `step` parameter. +The `avg_over_time()` let you aggregate numerical values by computing the maximum value of them, such as the all important span duration. +The time interval that the maximum is computer over is set by the `step` parameter. + For more information, refer to the [`step` API parameter](https://grafana.com/docs/tempo//api_docs/#traceql-metrics). ### Parameters -Numerical field that you want to calculate the minimum or maximum of. +Numerical field that you want to calculate the minimum, maximum or average of. ### Examples @@ -134,6 +140,16 @@ This example computes the maximum duration for each `http.target` of all spans n { name = "GET /:endpoint" } | max_over_time(span.http.response.size) ``` +This example computes the average duration for each `http.status_code` of all spans named `"GET /:endpoint"`. + +``` +{ name = "GET /:endpoint" } | avg_over_time(duration) by (span.http.status_code) +``` + +``` +{ name = "GET /:endpoint" } | avg_over_time(span.http.response.size) +``` + ## The `quantile_over_time` and `histogram_over_time` functions The `quantile_over_time()` and `histogram_over_time()` functions let you aggregate numerical values, such as the all important span duration. From ab24abce9ea00ed850928f5bc465076ec09ca681 Mon Sep 17 00:00:00 2001 From: Javier Molina Reyes Date: Thu, 31 Oct 2024 17:09:12 +0100 Subject: [PATCH 25/29] Update docs/sources/tempo/traceql/metrics-queries/functions.md Co-authored-by: Kim Nylander <104772500+knylander-grafana@users.noreply.github.com> --- docs/sources/tempo/traceql/metrics-queries/functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/tempo/traceql/metrics-queries/functions.md b/docs/sources/tempo/traceql/metrics-queries/functions.md index 01de1280265..5af943d8c35 100644 --- a/docs/sources/tempo/traceql/metrics-queries/functions.md +++ b/docs/sources/tempo/traceql/metrics-queries/functions.md @@ -97,7 +97,7 @@ This example counts the number of spans with name `"GET /:endpoint"` broken down ``` -## The `min_over_time`, `max_over_time` and `avg_over_time` functions +## The `min_over_time`, `max_over_time`, and `avg_over_time` functions The `min_over_time()` function lets you aggregate numerical attributes by calculating their minimum value. For example, you could choose to calculate the minimum duration of a group of spans, or you could choose to calculate the minimum value of a custom attribute you've attached to your spans, like `span.shopping.cart.entries`. From 2c104d5f9be7db4b97721cb53b6c83218937100c Mon Sep 17 00:00:00 2001 From: Javier Molina Reyes Date: Thu, 31 Oct 2024 17:09:31 +0100 Subject: [PATCH 26/29] Update docs/sources/tempo/traceql/metrics-queries/functions.md Co-authored-by: Kim Nylander <104772500+knylander-grafana@users.noreply.github.com> --- docs/sources/tempo/traceql/metrics-queries/functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/tempo/traceql/metrics-queries/functions.md b/docs/sources/tempo/traceql/metrics-queries/functions.md index 5af943d8c35..d53ee0cdcfa 100644 --- a/docs/sources/tempo/traceql/metrics-queries/functions.md +++ b/docs/sources/tempo/traceql/metrics-queries/functions.md @@ -106,7 +106,7 @@ The time interval that the minimum is computed over is set by the `step` paramet The `max_over_time()` let you aggregate numerical values by computing the maximum value of them, such as the all important span duration. The time interval that the maximum is computer over is set by the `step` parameter. -The `avg_over_time()` let you aggregate numerical values by computing the maximum value of them, such as the all important span duration. +The `avg_over_time()` function lets you aggregate numerical values by computing the maximum value of them, such as the all important span duration. The time interval that the maximum is computer over is set by the `step` parameter. For more information, refer to the [`step` API parameter](https://grafana.com/docs/tempo//api_docs/#traceql-metrics). From 80e36d97fc64b071aa00ce3740a92696556b3fcf Mon Sep 17 00:00:00 2001 From: Javier Molina Reyes Date: Thu, 31 Oct 2024 17:09:45 +0100 Subject: [PATCH 27/29] Update docs/sources/tempo/traceql/metrics-queries/functions.md Co-authored-by: Kim Nylander <104772500+knylander-grafana@users.noreply.github.com> --- docs/sources/tempo/traceql/metrics-queries/functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/tempo/traceql/metrics-queries/functions.md b/docs/sources/tempo/traceql/metrics-queries/functions.md index d53ee0cdcfa..cce59c97458 100644 --- a/docs/sources/tempo/traceql/metrics-queries/functions.md +++ b/docs/sources/tempo/traceql/metrics-queries/functions.md @@ -113,7 +113,7 @@ For more information, refer to the [`step` API parameter](https://grafana.com/do ### Parameters -Numerical field that you want to calculate the minimum, maximum or average of. +Numerical field that you want to calculate the minimum, maximum, or average of. ### Examples From 924b27ba882fd6d118a1b0e161bfc33ddca53d4c Mon Sep 17 00:00:00 2001 From: Javier Molina Reyes Date: Thu, 31 Oct 2024 17:09:54 +0100 Subject: [PATCH 28/29] Update docs/sources/tempo/traceql/metrics-queries/_index.md Co-authored-by: Kim Nylander <104772500+knylander-grafana@users.noreply.github.com> --- docs/sources/tempo/traceql/metrics-queries/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/tempo/traceql/metrics-queries/_index.md b/docs/sources/tempo/traceql/metrics-queries/_index.md index ece2a2e0486..817fd5dd005 100644 --- a/docs/sources/tempo/traceql/metrics-queries/_index.md +++ b/docs/sources/tempo/traceql/metrics-queries/_index.md @@ -60,7 +60,7 @@ Refer to [Solve problems using metrics queries](./solve-problems-metrics-queries ### Functions -TraceQL metrics queries currently include the following functions for aggregating over groups of spans: `rate`, `count_over_time`,`max_over_time`,`min_over_time`,`avg_over_time`, `quantile_over_time`, `histogram_over_time`, and `compare`. +TraceQL metrics queries currently include the following functions for aggregating over groups of spans: `rate`, `count_over_time`, `max_over_time`, `min_over_time`, `avg_over_time`, `quantile_over_time`, `histogram_over_time`, and `compare`. These functions can be added as an operator at the end of any TraceQL query. For detailed information and example queries for each function, refer to [TraceQL metrics functions](./functions). From 057299cd295146b9b437c6e8529986bbc62d51ec Mon Sep 17 00:00:00 2001 From: Javier Molina Reyes Date: Thu, 31 Oct 2024 17:10:44 +0100 Subject: [PATCH 29/29] Update docs/sources/tempo/traceql/metrics-queries/functions.md Co-authored-by: Kim Nylander <104772500+knylander-grafana@users.noreply.github.com> --- docs/sources/tempo/traceql/metrics-queries/functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/tempo/traceql/metrics-queries/functions.md b/docs/sources/tempo/traceql/metrics-queries/functions.md index cce59c97458..392d2c92f20 100644 --- a/docs/sources/tempo/traceql/metrics-queries/functions.md +++ b/docs/sources/tempo/traceql/metrics-queries/functions.md @@ -12,7 +12,7 @@ keywords: -TraceQL supports `rate`, `count_over_time`,`min_over_time`,`avg_over_time`, `quantile_over_time`, `histogram_over_time`, and `compare` functions. +TraceQL supports `rate`, `count_over_time`, `min_over_time`, `avg_over_time`, `quantile_over_time`, `histogram_over_time`, and `compare` functions. ## Available functions