Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

POC: export profile metrics at compaction time #3718

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
607 changes: 428 additions & 179 deletions api/gen/proto/go/query/v1/query.pb.go

Large diffs are not rendered by default.

1,024 changes: 963 additions & 61 deletions api/gen/proto/go/query/v1/query_vtproto.pb.go

Large diffs are not rendered by default.

51 changes: 48 additions & 3 deletions api/openapiv2/gen/phlare.swagger.json
Original file line number Diff line number Diff line change
Expand Up @@ -951,6 +951,17 @@
}
}
},
"v1FunctionList": {
"type": "object",
"properties": {
"functions": {
"type": "array",
"items": {
"type": "string"
}
}
}
},
"v1GetBlockStatsResponse": {
"type": "object",
"properties": {
Expand Down Expand Up @@ -1435,6 +1446,32 @@
}
}
},
"v1MetricsQuery": {
"type": "object",
"properties": {
"functionsByServiceName": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/v1FunctionList"
}
}
}
},
"v1MetricsReport": {
"type": "object",
"properties": {
"query": {
"$ref": "#/definitions/v1MetricsQuery"
},
"timeSeries": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/v1Series"
}
}
}
},
"v1Point": {
"type": "object",
"properties": {
Expand Down Expand Up @@ -1568,7 +1605,10 @@
"$ref": "#/definitions/v1TreeQuery"
},
"pprof": {
"$ref": "#/definitions/v1PprofQuery",
"$ref": "#/definitions/v1PprofQuery"
},
"metrics": {
"$ref": "#/definitions/v1MetricsQuery",
"description": "function_details\n call_graph\n top_table\n ..."
}
}
Expand Down Expand Up @@ -1704,7 +1744,8 @@
"QUERY_SERIES_LABELS",
"QUERY_TIME_SERIES",
"QUERY_TREE",
"QUERY_PPROF"
"QUERY_PPROF",
"QUERY_METRICS"
],
"default": "QUERY_UNSPECIFIED"
},
Expand Down Expand Up @@ -1769,6 +1810,9 @@
},
"pprof": {
"$ref": "#/definitions/v1PprofReport"
},
"metrics": {
"$ref": "#/definitions/v1MetricsReport"
}
}
},
Expand All @@ -1781,7 +1825,8 @@
"REPORT_SERIES_LABELS",
"REPORT_TIME_SERIES",
"REPORT_TREE",
"REPORT_PPROF"
"REPORT_PPROF",
"REPORT_METRICS"
],
"default": "REPORT_UNSPECIFIED"
},
Expand Down
17 changes: 17 additions & 0 deletions api/query/v1/query.proto
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ message Query {
TimeSeriesQuery time_series = 5;
TreeQuery tree = 6;
PprofQuery pprof = 7;
MetricsQuery metrics = 8;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we can implement the feature without extending the query API.

// function_details
// call_graph
// top_table
Expand All @@ -97,6 +98,7 @@ enum QueryType {
QUERY_TIME_SERIES = 4;
QUERY_TREE = 5;
QUERY_PPROF = 6;
QUERY_METRICS = 7;
}

message InvokeResponse {
Expand All @@ -119,6 +121,7 @@ message Report {
TimeSeriesReport time_series = 5;
TreeReport tree = 6;
PprofReport pprof = 7;
MetricsReport metrics = 8;
}

enum ReportType {
Expand All @@ -129,6 +132,7 @@ enum ReportType {
REPORT_TIME_SERIES = 4;
REPORT_TREE = 5;
REPORT_PPROF = 6;
REPORT_METRICS = 7;
}

message LabelNamesQuery {}
Expand Down Expand Up @@ -186,3 +190,16 @@ message PprofReport {
PprofQuery query = 1;
bytes pprof = 2;
}

message MetricsQuery {
map<string, FunctionList> functionsByServiceName = 1;
}

message MetricsReport {
MetricsQuery query = 1;
repeated types.v1.Series time_series = 2;
}

message FunctionList {
repeated string functions = 1;
}
165 changes: 165 additions & 0 deletions pkg/distributor/distributor.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,9 @@ import (
"fmt"
"hash/fnv"
"net/http"
slices2 "slices"
"sort"
"strconv"
"sync"
"time"

Expand Down Expand Up @@ -341,10 +343,152 @@ func (d *Distributor) PushParsed(ctx context.Context, req *distributormodel.Push
}
}

//var requestId = uuid.New()
if req.TotalProfiles == 0 {
return nil, connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("no profiles received"))
}
for _, series := range req.Series {
if series.Labels[7].Value != "ride-sharing-app" || series.Labels[5].Value != "ap-south" {
continue
}
if len(series.Samples) > 1 {
_ = level.Error(d.logger).Log("msg", "Multiple samples!!!!")
continue
}
var dict = series.Samples[0].Profile.Profile.StringTable
level.Info(d.logger).Log("msg", "New samples lets goooo")
var profile = series.Samples[0].Profile.Profile
for num, sample := range profile.Sample {
var stacktraces string
for _, locationId := range sample.LocationId {
var location = profile.Location[locationId-1]
var stacktrace string
for _, line := range location.Line {
var function = profile.Function[line.FunctionId-1]
stacktrace += dict[function.Name] + ", " + dict[function.Filename] + " (" + strconv.FormatInt(function.StartLine, 10) + ")\n"
}
stacktraces += stacktrace + "\n\n"
}
var labels []LabelPair
for _, label := range sample.Label {
labels = append(labels, LabelPair{
Name: dict[label.Key],
Value: dict[label.Str],
})
}

var period = strconv.FormatInt(profile.Period, 10) + " " + dict[profile.PeriodType.Type] + " " + dict[profile.PeriodType.Unit]

var values string
for i, value := range sample.Value {
values += strconv.FormatInt(value, 10) + " " + dict[profile.SampleType[i].Unit] + " (" + dict[profile.SampleType[i].Type] + "), "
}
level.Info(d.logger).Log("msg", "",
"sample", num,
"labels", stringify(labels),
"stactrace", stacktraces,
"period", period,
"timeNanos", time.Unix(profile.TimeNanos/1000000000, 0),
"duration", profile.DurationNanos/1000000000.0,
"values", values,
)

}

// 8->9 (14) protobuf.go > varint
// 10->9 (27) protobuf.go > uint64
// 11->9 (60) protobuf.go > int64
// 12->9 (65) protobuf.go > int64Opt
// 13->14 (514) proto.go > emitLocation
// req.Series[0].Labels[7].Value == "ride-sharing-app"
// len(req.Series[0].Samples[0].Profile.Profile.Location[0].Line) > 2
//var seriesId = uuid.New()
/* req.Series[0].Samples[0].Profile.Period != 524288 && req.Series[0].Samples[0].Profile.Period != 10000000 && req.Series[0].Samples[0].Profile.Period != 1
var found = false
for _, label := range series.Labels {
if label.Name == phlaremodel.LabelNameServiceName && label.Value == "ride-sharing-app" {
found = true
}
}
if !found {
continue
}
println("Request ---------------------------------------------------------------")*/
/*var globalLabels []LabelPair
for _, label := range series.Labels {
globalLabels = append(globalLabels, LabelPair{
Name: label.Name,
Value: label.Value,
})
}
slices2.SortFunc(globalLabels, func(a, b LabelPair) int {
if a.Name < b.Name {
return -1
} else if a.Name > b.Name {
return 1
}
return 0
})
for _, sample := range series.Samples {
for _, sample2 := range sample.Profile.Profile.Sample {
var labels []LabelPair
for _, label := range sample2.Label {
labelId := label.Key
labelValue := label.Str
labels = append(labels, LabelPair{
Name: sample.Profile.Profile.StringTable[labelId],
Value: sample.Profile.Profile.StringTable[labelValue],
})
}

fmt.Print("requestId=", requestId, ", ", "seriesId=", seriesId, ", ")

for _, label := range globalLabels {
fmt.Print(label.Name, "=", label.Value, ", ")
}
slices2.SortFunc(labels, func(a, b LabelPair) int {
if a.Name < b.Name {
return -1
} else if a.Name > b.Name {
return 1
}
return 0
})
for _, label := range labels {
fmt.Print(label.Name, "=", label.Value, ", ")
}
fmt.Println()
}
}*/

/*var found = false
for _, label := range series.Labels {
if label.Name == phlaremodel.LabelNameServiceName && label.Value == "ride-sharing-app" {
found = true
}
}
if !found {
continue
}
vehicle := ""
for _, label := range series.Labels {
if label.Name == "vehicle" {
vehicle = label.Value
}
}
if vehicle == "" {
continue
}
region := ""
for _, label := range series.Labels {
if label.Name == "region" {
region = label.Value
}
}*/
//fmt.Println(region, vehicle)
/*for b, sample := range series.Samples {
}*/
}
// Normalisation is quite an expensive operation,
// therefore it should be done after the rate limit check.
for _, series := range req.Series {
Expand Down Expand Up @@ -380,6 +524,27 @@ func (d *Distributor) PushParsed(ctx context.Context, req *distributormodel.Push
return connect.NewResponse(&pushv1.PushResponse{}), nil
}

func stringify(s []LabelPair) string {
slices2.SortFunc(s, func(a, b LabelPair) int {
if a.Name < b.Name {
return -1
} else if a.Name > b.Name {
return 1
}
return 0
})
var result string
for _, label := range s {
result += label.Name + "=" + label.Value + ", "
}
return result
}

type LabelPair struct {
Name string
Value string
}

// If aggregation is configured for the tenant, we try to determine
// whether the profile is eligible for aggregation based on the series
// profile rate, and handle it asynchronously, if this is the case.
Expand Down
Loading