diff --git a/cmd/baton-vgs/config.go b/cmd/baton-vgs/config.go deleted file mode 100644 index 810a841..0000000 --- a/cmd/baton-vgs/config.go +++ /dev/null @@ -1,47 +0,0 @@ -package main - -import ( - "context" - "errors" - - "github.com/conductorone/baton-sdk/pkg/cli" - "github.com/spf13/cobra" -) - -// config defines the external configuration required for the connector to run. -type config struct { - cli.BaseConfig `mapstructure:",squash"` // Puts the base config options in the same place as the connector options - Vault string `mapstructure:"vault"` - ServiceAccountClientId string `mapstructure:"service-account-client-id"` - ServiceAccountClientSecret string `mapstructure:"service-account-client-secret"` - OrganizationId string `mapstructure:"organization-id"` -} - -// validateConfig is run after the configuration is loaded, and should return an error if it isn't valid. -func validateConfig(ctx context.Context, cfg *config) error { - if cfg.Vault == "" { - return errors.New("vault is required") - } - - if cfg.ServiceAccountClientId == "" { - return errors.New("service-account-client-id is required") - } - - if cfg.ServiceAccountClientSecret == "" { - return errors.New("service-account-client-secret is required") - } - - if cfg.OrganizationId == "" { - return errors.New("organization-id is required") - } - - return nil -} - -// cmdFlags sets the cmdFlags required for the connector. -func cmdFlags(cmd *cobra.Command) { - cmd.PersistentFlags().String("vault", "", "The VGS vault id. ($BATON_VAULT)") - cmd.PersistentFlags().String("service-account-client-id", "", "The VGS client id. ($BATON_SERVICE_ACCOUNT_CLIENT_ID)") - cmd.PersistentFlags().String("service-account-client-secret", "", "The VGS client secret. ($BATON_SERVICE_ACCOUNT_CLIENT_SECRET)") - cmd.PersistentFlags().String("organization-id", "", "The VGS organization id. ($BATON_ORGANIZATION_ID)") -} diff --git a/cmd/baton-vgs/main.go b/cmd/baton-vgs/main.go index fbddc34..65f0067 100644 --- a/cmd/baton-vgs/main.go +++ b/cmd/baton-vgs/main.go @@ -5,30 +5,42 @@ import ( "fmt" "os" - "github.com/conductorone/baton-sdk/pkg/cli" + configSchema "github.com/conductorone/baton-sdk/pkg/config" "github.com/conductorone/baton-sdk/pkg/connectorbuilder" + "github.com/conductorone/baton-sdk/pkg/field" "github.com/conductorone/baton-sdk/pkg/types" + "github.com/conductorone/baton-vgs/pkg/connector" "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "github.com/spf13/viper" "go.uber.org/zap" +) - "github.com/conductorone/baton-vgs/pkg/connector" +const ( + version = "dev" + connectorName = "baton-vgs" + serviceAccountClientId = "service-account-client-id" + serviceAccountClientSecret = "service-account-client-secret" + organizationId = "organization-id" + vault = "vault" ) -var version = "dev" +var ( + ServiceAccountClientId = field.StringField(serviceAccountClientId, field.WithRequired(true), field.WithDescription("The VGS client id.")) + ServiceAccountClientSecret = field.StringField(serviceAccountClientSecret, field.WithRequired(true), field.WithDescription("The VGS client secret.")) + OrganizationId = field.StringField(organizationId, field.WithRequired(true), field.WithDescription("The VGS organization id.")) + Vault = field.StringField(vault, field.WithRequired(true), field.WithDescription("The VGS vault id.")) + configurationFields = []field.SchemaField{Vault, ServiceAccountClientId, ServiceAccountClientSecret, OrganizationId} +) func main() { ctx := context.Background() - - cfg := &config{} - cmd, err := cli.NewCmd(ctx, "baton-vgs", cfg, validateConfig, getConnector) + _, cmd, err := configSchema.DefineConfiguration(ctx, connectorName, getConnector, field.NewConfiguration(configurationFields)) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } cmd.Version = version - cmdFlags(cmd) - err = cmd.Execute() if err != nil { fmt.Fprintln(os.Stderr, err.Error()) @@ -36,13 +48,14 @@ func main() { } } -func getConnector(ctx context.Context, cfg *config) (types.ConnectorServer, error) { +func getConnector(ctx context.Context, cfg *viper.Viper) (types.ConnectorServer, error) { l := ctxzap.Extract(ctx) cb, err := connector.New(ctx, - cfg.ServiceAccountClientId, - cfg.ServiceAccountClientSecret, - cfg.OrganizationId, - cfg.Vault) + cfg.GetString(serviceAccountClientId), + cfg.GetString(serviceAccountClientSecret), + cfg.GetString(organizationId), + cfg.GetString(vault), + ) if err != nil { l.Error("error creating connector", zap.Error(err)) return nil, err diff --git a/go.mod b/go.mod index 27f3bc7..a2153f3 100644 --- a/go.mod +++ b/go.mod @@ -3,12 +3,12 @@ module github.com/conductorone/baton-vgs go 1.22.1 require ( - github.com/conductorone/baton-sdk v0.1.38 + github.com/conductorone/baton-sdk v0.2.12 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/spf13/cobra v1.8.0 + github.com/spf13/viper v1.18.2 github.com/stretchr/testify v1.9.0 go.uber.org/zap v1.27.0 - golang.org/x/text v0.15.0 + golang.org/x/text v0.16.0 ) require ( @@ -35,6 +35,7 @@ require ( github.com/aws/smithy-go v1.20.2 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/deckarep/golang-set/v2 v2.6.0 // indirect github.com/doug-martin/goqu/v9 v9.19.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect @@ -66,20 +67,22 @@ require ( github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.18.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.opentelemetry.io/otel v1.27.0 // indirect + go.opentelemetry.io/otel/metric v1.27.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/ratelimit v0.3.1 // indirect - golang.org/x/crypto v0.23.0 // indirect + golang.org/x/crypto v0.24.0 // indirect golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect - golang.org/x/net v0.25.0 // indirect + golang.org/x/net v0.26.0 // indirect golang.org/x/oauth2 v0.20.0 // indirect golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.20.0 // indirect + golang.org/x/sys v0.21.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae // indirect google.golang.org/grpc v1.63.2 // indirect google.golang.org/protobuf v1.34.1 // indirect diff --git a/go.sum b/go.sum index f56bb14..d753a7c 100644 --- a/go.sum +++ b/go.sum @@ -50,13 +50,15 @@ github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/conductorone/baton-sdk v0.1.38 h1:KPFguiXeQZEtgA+g0CLzHduGe/1vrEkpVsM4XJqsqbE= -github.com/conductorone/baton-sdk v0.1.38/go.mod h1:yzfAXTvKzGJfhfrNTQ/Ipzt9+KPCzHrNO/Ro1gsxyDk= +github.com/conductorone/baton-sdk v0.2.12 h1:u5tbqpSgk/hTsZ8auwEQFqS8UxprK9k8yuJddY8hsfE= +github.com/conductorone/baton-sdk v0.2.12/go.mod h1:cg5FyUcJnD7xK5SPbHe/KNpwUVVlpHJ9rnmd3UwxSkU= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= +github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/doug-martin/goqu/v9 v9.19.0 h1:PD7t1X3tRcUiSdc5TEyOFKujZA5gs3VSA7wxSvBx7qo= github.com/doug-martin/goqu/v9 v9.19.0/go.mod h1:nf0Wc2/hV3gYK9LiyqIrzBEVGlI8qW3GuDCEobC4wBQ= @@ -78,6 +80,10 @@ github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7 github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= @@ -133,6 +139,7 @@ github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0V github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.7 h1:fxWBnXkxfM6sRiuH3bqJ4CfzZojMOLVc0UTsTglEghA= github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -209,6 +216,18 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9FBx6cJaIC5hYx5Fe64nC8w5Cylt/0= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= +go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -230,8 +249,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= @@ -243,8 +262,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -257,8 +276,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -290,23 +309,23 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -318,8 +337,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= -golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_entitlement.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_entitlement.pb.go new file mode 100644 index 0000000..7e53681 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_entitlement.pb.go @@ -0,0 +1,164 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc (unknown) +// source: c1/connector/v2/annotation_entitlement.proto + +package v2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type EntitlementImmutable struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SourceId string `protobuf:"bytes,1,opt,name=source_id,json=sourceId,proto3" json:"source_id,omitempty"` + Metadata *structpb.Struct `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *EntitlementImmutable) Reset() { + *x = EntitlementImmutable{} + if protoimpl.UnsafeEnabled { + mi := &file_c1_connector_v2_annotation_entitlement_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EntitlementImmutable) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntitlementImmutable) ProtoMessage() {} + +func (x *EntitlementImmutable) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_entitlement_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EntitlementImmutable.ProtoReflect.Descriptor instead. +func (*EntitlementImmutable) Descriptor() ([]byte, []int) { + return file_c1_connector_v2_annotation_entitlement_proto_rawDescGZIP(), []int{0} +} + +func (x *EntitlementImmutable) GetSourceId() string { + if x != nil { + return x.SourceId + } + return "" +} + +func (x *EntitlementImmutable) GetMetadata() *structpb.Struct { + if x != nil { + return x.Metadata + } + return nil +} + +var File_c1_connector_v2_annotation_entitlement_proto protoreflect.FileDescriptor + +var file_c1_connector_v2_annotation_entitlement_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, + 0x32, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, + 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, + 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x68, 0x0a, + 0x14, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x6d, 0x6d, 0x75, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x49, 0x64, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, + 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, + 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_c1_connector_v2_annotation_entitlement_proto_rawDescOnce sync.Once + file_c1_connector_v2_annotation_entitlement_proto_rawDescData = file_c1_connector_v2_annotation_entitlement_proto_rawDesc +) + +func file_c1_connector_v2_annotation_entitlement_proto_rawDescGZIP() []byte { + file_c1_connector_v2_annotation_entitlement_proto_rawDescOnce.Do(func() { + file_c1_connector_v2_annotation_entitlement_proto_rawDescData = protoimpl.X.CompressGZIP(file_c1_connector_v2_annotation_entitlement_proto_rawDescData) + }) + return file_c1_connector_v2_annotation_entitlement_proto_rawDescData +} + +var file_c1_connector_v2_annotation_entitlement_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_c1_connector_v2_annotation_entitlement_proto_goTypes = []interface{}{ + (*EntitlementImmutable)(nil), // 0: c1.connector.v2.EntitlementImmutable + (*structpb.Struct)(nil), // 1: google.protobuf.Struct +} +var file_c1_connector_v2_annotation_entitlement_proto_depIdxs = []int32{ + 1, // 0: c1.connector.v2.EntitlementImmutable.metadata:type_name -> google.protobuf.Struct + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_entitlement_proto_init() } +func file_c1_connector_v2_annotation_entitlement_proto_init() { + if File_c1_connector_v2_annotation_entitlement_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_c1_connector_v2_annotation_entitlement_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EntitlementImmutable); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_c1_connector_v2_annotation_entitlement_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_entitlement_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_entitlement_proto_depIdxs, + MessageInfos: file_c1_connector_v2_annotation_entitlement_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_entitlement_proto = out.File + file_c1_connector_v2_annotation_entitlement_proto_rawDesc = nil + file_c1_connector_v2_annotation_entitlement_proto_goTypes = nil + file_c1_connector_v2_annotation_entitlement_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_entitlement.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_entitlement.pb.validate.go new file mode 100644 index 0000000..189bf53 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_entitlement.pb.validate.go @@ -0,0 +1,169 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: c1/connector/v2/annotation_entitlement.proto + +package v2 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on EntitlementImmutable with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *EntitlementImmutable) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EntitlementImmutable with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// EntitlementImmutableMultiError, or nil if none found. +func (m *EntitlementImmutable) ValidateAll() error { + return m.validate(true) +} + +func (m *EntitlementImmutable) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for SourceId + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EntitlementImmutableValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EntitlementImmutableValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EntitlementImmutableValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return EntitlementImmutableMultiError(errors) + } + + return nil +} + +// EntitlementImmutableMultiError is an error wrapping multiple validation +// errors returned by EntitlementImmutable.ValidateAll() if the designated +// constraints aren't met. +type EntitlementImmutableMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EntitlementImmutableMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EntitlementImmutableMultiError) AllErrors() []error { return m } + +// EntitlementImmutableValidationError is the validation error returned by +// EntitlementImmutable.Validate if the designated constraints aren't met. +type EntitlementImmutableValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EntitlementImmutableValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EntitlementImmutableValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EntitlementImmutableValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EntitlementImmutableValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EntitlementImmutableValidationError) ErrorName() string { + return "EntitlementImmutableValidationError" +} + +// Error satisfies the builtin error interface +func (e EntitlementImmutableValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEntitlementImmutable.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EntitlementImmutableValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EntitlementImmutableValidationError{} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_ticket.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_ticket.pb.go new file mode 100644 index 0000000..445884a --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_ticket.pb.go @@ -0,0 +1,148 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc (unknown) +// source: c1/connector/v2/annotation_external_ticket.proto + +package v2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ExternalTicketSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` +} + +func (x *ExternalTicketSettings) Reset() { + *x = ExternalTicketSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_c1_connector_v2_annotation_external_ticket_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExternalTicketSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalTicketSettings) ProtoMessage() {} + +func (x *ExternalTicketSettings) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_external_ticket_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExternalTicketSettings.ProtoReflect.Descriptor instead. +func (*ExternalTicketSettings) Descriptor() ([]byte, []int) { + return file_c1_connector_v2_annotation_external_ticket_proto_rawDescGZIP(), []int{0} +} + +func (x *ExternalTicketSettings) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +var File_c1_connector_v2_annotation_external_ticket_proto protoreflect.FileDescriptor + +var file_c1_connector_v2_annotation_external_ticket_proto_rawDesc = []byte{ + 0x0a, 0x30, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, + 0x32, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x2e, 0x76, 0x32, 0x22, 0x32, 0x0a, 0x16, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x54, + 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, + 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, + 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_c1_connector_v2_annotation_external_ticket_proto_rawDescOnce sync.Once + file_c1_connector_v2_annotation_external_ticket_proto_rawDescData = file_c1_connector_v2_annotation_external_ticket_proto_rawDesc +) + +func file_c1_connector_v2_annotation_external_ticket_proto_rawDescGZIP() []byte { + file_c1_connector_v2_annotation_external_ticket_proto_rawDescOnce.Do(func() { + file_c1_connector_v2_annotation_external_ticket_proto_rawDescData = protoimpl.X.CompressGZIP(file_c1_connector_v2_annotation_external_ticket_proto_rawDescData) + }) + return file_c1_connector_v2_annotation_external_ticket_proto_rawDescData +} + +var file_c1_connector_v2_annotation_external_ticket_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_c1_connector_v2_annotation_external_ticket_proto_goTypes = []interface{}{ + (*ExternalTicketSettings)(nil), // 0: c1.connector.v2.ExternalTicketSettings +} +var file_c1_connector_v2_annotation_external_ticket_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_external_ticket_proto_init() } +func file_c1_connector_v2_annotation_external_ticket_proto_init() { + if File_c1_connector_v2_annotation_external_ticket_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_c1_connector_v2_annotation_external_ticket_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExternalTicketSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_c1_connector_v2_annotation_external_ticket_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_external_ticket_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_external_ticket_proto_depIdxs, + MessageInfos: file_c1_connector_v2_annotation_external_ticket_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_external_ticket_proto = out.File + file_c1_connector_v2_annotation_external_ticket_proto_rawDesc = nil + file_c1_connector_v2_annotation_external_ticket_proto_goTypes = nil + file_c1_connector_v2_annotation_external_ticket_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_ticket.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_ticket.pb.validate.go new file mode 100644 index 0000000..abe6a36 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_ticket.pb.validate.go @@ -0,0 +1,140 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: c1/connector/v2/annotation_external_ticket.proto + +package v2 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ExternalTicketSettings with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ExternalTicketSettings) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ExternalTicketSettings with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ExternalTicketSettingsMultiError, or nil if none found. +func (m *ExternalTicketSettings) ValidateAll() error { + return m.validate(true) +} + +func (m *ExternalTicketSettings) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Enabled + + if len(errors) > 0 { + return ExternalTicketSettingsMultiError(errors) + } + + return nil +} + +// ExternalTicketSettingsMultiError is an error wrapping multiple validation +// errors returned by ExternalTicketSettings.ValidateAll() if the designated +// constraints aren't met. +type ExternalTicketSettingsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ExternalTicketSettingsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ExternalTicketSettingsMultiError) AllErrors() []error { return m } + +// ExternalTicketSettingsValidationError is the validation error returned by +// ExternalTicketSettings.Validate if the designated constraints aren't met. +type ExternalTicketSettingsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ExternalTicketSettingsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ExternalTicketSettingsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ExternalTicketSettingsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ExternalTicketSettingsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ExternalTicketSettingsValidationError) ErrorName() string { + return "ExternalTicketSettingsValidationError" +} + +// Error satisfies the builtin error interface +func (e ExternalTicketSettingsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sExternalTicketSettings.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ExternalTicketSettingsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ExternalTicketSettingsValidationError{} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant.pb.go index 7957869..725f84a 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant.pb.go @@ -131,6 +131,61 @@ func (x *GrantExpandable) GetResourceTypeIds() []string { return nil } +type GrantImmutable struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SourceId string `protobuf:"bytes,1,opt,name=source_id,json=sourceId,proto3" json:"source_id,omitempty"` + Metadata *structpb.Struct `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *GrantImmutable) Reset() { + *x = GrantImmutable{} + if protoimpl.UnsafeEnabled { + mi := &file_c1_connector_v2_annotation_grant_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrantImmutable) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantImmutable) ProtoMessage() {} + +func (x *GrantImmutable) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_grant_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrantImmutable.ProtoReflect.Descriptor instead. +func (*GrantImmutable) Descriptor() ([]byte, []int) { + return file_c1_connector_v2_annotation_grant_proto_rawDescGZIP(), []int{2} +} + +func (x *GrantImmutable) GetSourceId() string { + if x != nil { + return x.SourceId + } + return "" +} + +func (x *GrantImmutable) GetMetadata() *structpb.Struct { + if x != nil { + return x.Metadata + } + return nil +} + var File_c1_connector_v2_annotation_grant_proto protoreflect.FileDescriptor var file_c1_connector_v2_annotation_grant_proto_rawDesc = []byte{ @@ -152,10 +207,17 @@ var file_c1_connector_v2_annotation_grant_proto_rawDesc = []byte{ 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x2a, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x49, 0x64, 0x73, - 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, - 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, - 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x22, 0x62, 0x0a, 0x0e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x49, 0x6d, 0x6d, 0x75, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, + 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, + 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -170,19 +232,21 @@ func file_c1_connector_v2_annotation_grant_proto_rawDescGZIP() []byte { return file_c1_connector_v2_annotation_grant_proto_rawDescData } -var file_c1_connector_v2_annotation_grant_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_c1_connector_v2_annotation_grant_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_c1_connector_v2_annotation_grant_proto_goTypes = []interface{}{ (*GrantMetadata)(nil), // 0: c1.connector.v2.GrantMetadata (*GrantExpandable)(nil), // 1: c1.connector.v2.GrantExpandable - (*structpb.Struct)(nil), // 2: google.protobuf.Struct + (*GrantImmutable)(nil), // 2: c1.connector.v2.GrantImmutable + (*structpb.Struct)(nil), // 3: google.protobuf.Struct } var file_c1_connector_v2_annotation_grant_proto_depIdxs = []int32{ - 2, // 0: c1.connector.v2.GrantMetadata.metadata:type_name -> google.protobuf.Struct - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 3, // 0: c1.connector.v2.GrantMetadata.metadata:type_name -> google.protobuf.Struct + 3, // 1: c1.connector.v2.GrantImmutable.metadata:type_name -> google.protobuf.Struct + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_c1_connector_v2_annotation_grant_proto_init() } @@ -215,6 +279,18 @@ func file_c1_connector_v2_annotation_grant_proto_init() { return nil } } + file_c1_connector_v2_annotation_grant_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrantImmutable); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -222,7 +298,7 @@ func file_c1_connector_v2_annotation_grant_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_c1_connector_v2_annotation_grant_proto_rawDesc, NumEnums: 0, - NumMessages: 2, + NumMessages: 3, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant.pb.validate.go index 05d3ff2..98bfe0e 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant.pb.validate.go @@ -265,3 +265,134 @@ var _ interface { Cause() error ErrorName() string } = GrantExpandableValidationError{} + +// Validate checks the field values on GrantImmutable with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GrantImmutable) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrantImmutable with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in GrantImmutableMultiError, +// or nil if none found. +func (m *GrantImmutable) ValidateAll() error { + return m.validate(true) +} + +func (m *GrantImmutable) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for SourceId + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrantImmutableValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrantImmutableValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrantImmutableValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GrantImmutableMultiError(errors) + } + + return nil +} + +// GrantImmutableMultiError is an error wrapping multiple validation errors +// returned by GrantImmutable.ValidateAll() if the designated constraints +// aren't met. +type GrantImmutableMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrantImmutableMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrantImmutableMultiError) AllErrors() []error { return m } + +// GrantImmutableValidationError is the validation error returned by +// GrantImmutable.Validate if the designated constraints aren't met. +type GrantImmutableValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrantImmutableValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrantImmutableValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrantImmutableValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrantImmutableValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrantImmutableValidationError) ErrorName() string { return "GrantImmutableValidationError" } + +// Error satisfies the builtin error interface +func (e GrantImmutableValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrantImmutable.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrantImmutableValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrantImmutableValidationError{} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/ticket.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/ticket.pb.go index 8731ec7..0415504 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/ticket.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/ticket.pb.go @@ -1063,6 +1063,7 @@ type Ticket struct { CreatedAt *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` CompletedAt *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=completed_at,json=completedAt,proto3" json:"completed_at,omitempty"` + RequestedFor *Resource `protobuf:"bytes,15,opt,name=requested_for,json=requestedFor,proto3" json:"requested_for,omitempty"` } func (x *Ticket) Reset() { @@ -1188,6 +1189,13 @@ func (x *Ticket) GetCompletedAt() *timestamppb.Timestamp { return nil } +func (x *Ticket) GetRequestedFor() *Resource { + if x != nil { + return x.RequestedFor + } + return nil +} + type TicketType struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1254,6 +1262,7 @@ type TicketRequest struct { Type *TicketType `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` Labels []string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty"` CustomFields map[string]*TicketCustomField `protobuf:"bytes,6,rep,name=custom_fields,json=customFields,proto3" json:"custom_fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + RequestedFor *Resource `protobuf:"bytes,7,opt,name=requested_for,json=requestedFor,proto3" json:"requested_for,omitempty"` } func (x *TicketRequest) Reset() { @@ -1330,6 +1339,13 @@ func (x *TicketRequest) GetCustomFields() map[string]*TicketCustomField { return nil } +func (x *TicketRequest) GetRequestedFor() *Resource { + if x != nil { + return x.RequestedFor + } + return nil +} + type TicketsServiceCreateTicketRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1753,7 +1769,7 @@ var file_c1_connector_v2_ticket_proto_rawDesc = []byte{ 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xc9, 0x05, 0x0a, 0x06, 0x54, 0x69, 0x63, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x06, 0x0a, 0x06, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, @@ -1791,7 +1807,11 @@ var file_c1_connector_v2_ticket_proto_rawDesc = []byte{ 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x1a, + 0x6d, 0x70, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, + 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x6f, 0x72, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x46, 0x6f, 0x72, 0x1a, 0x63, 0x0a, 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, @@ -1802,7 +1822,7 @@ var file_c1_connector_v2_ticket_proto_rawDesc = []byte{ 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, - 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x90, 0x03, 0x0a, 0x0d, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, + 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xd0, 0x03, 0x0a, 0x0d, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, @@ -1821,7 +1841,11 @@ var file_c1_connector_v2_ticket_proto_rawDesc = []byte{ 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x73, 0x1a, 0x63, 0x0a, 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, + 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, + 0x46, 0x6f, 0x72, 0x1a, 0x63, 0x0a, 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, @@ -1979,33 +2003,35 @@ var file_c1_connector_v2_ticket_proto_depIdxs = []int32{ 26, // 27: c1.connector.v2.Ticket.created_at:type_name -> google.protobuf.Timestamp 26, // 28: c1.connector.v2.Ticket.updated_at:type_name -> google.protobuf.Timestamp 26, // 29: c1.connector.v2.Ticket.completed_at:type_name -> google.protobuf.Timestamp - 11, // 30: c1.connector.v2.TicketRequest.status:type_name -> c1.connector.v2.TicketStatus - 17, // 31: c1.connector.v2.TicketRequest.type:type_name -> c1.connector.v2.TicketType - 25, // 32: c1.connector.v2.TicketRequest.custom_fields:type_name -> c1.connector.v2.TicketRequest.CustomFieldsEntry - 18, // 33: c1.connector.v2.TicketsServiceCreateTicketRequest.request:type_name -> c1.connector.v2.TicketRequest - 0, // 34: c1.connector.v2.TicketsServiceCreateTicketRequest.schema:type_name -> c1.connector.v2.TicketSchema - 27, // 35: c1.connector.v2.TicketsServiceCreateTicketRequest.annotations:type_name -> google.protobuf.Any - 16, // 36: c1.connector.v2.TicketsServiceCreateTicketResponse.ticket:type_name -> c1.connector.v2.Ticket - 27, // 37: c1.connector.v2.TicketsServiceCreateTicketResponse.annotations:type_name -> google.protobuf.Any - 27, // 38: c1.connector.v2.TicketsServiceGetTicketRequest.annotations:type_name -> google.protobuf.Any - 16, // 39: c1.connector.v2.TicketsServiceGetTicketResponse.ticket:type_name -> c1.connector.v2.Ticket - 27, // 40: c1.connector.v2.TicketsServiceGetTicketResponse.annotations:type_name -> google.protobuf.Any - 1, // 41: c1.connector.v2.TicketSchema.CustomFieldsEntry.value:type_name -> c1.connector.v2.TicketCustomField - 1, // 42: c1.connector.v2.Ticket.CustomFieldsEntry.value:type_name -> c1.connector.v2.TicketCustomField - 1, // 43: c1.connector.v2.TicketRequest.CustomFieldsEntry.value:type_name -> c1.connector.v2.TicketCustomField - 19, // 44: c1.connector.v2.TicketsService.CreateTicket:input_type -> c1.connector.v2.TicketsServiceCreateTicketRequest - 21, // 45: c1.connector.v2.TicketsService.GetTicket:input_type -> c1.connector.v2.TicketsServiceGetTicketRequest - 14, // 46: c1.connector.v2.TicketsService.ListTicketSchemas:input_type -> c1.connector.v2.TicketsServiceListTicketSchemasRequest - 12, // 47: c1.connector.v2.TicketsService.GetTicketSchema:input_type -> c1.connector.v2.TicketsServiceGetTicketSchemaRequest - 20, // 48: c1.connector.v2.TicketsService.CreateTicket:output_type -> c1.connector.v2.TicketsServiceCreateTicketResponse - 22, // 49: c1.connector.v2.TicketsService.GetTicket:output_type -> c1.connector.v2.TicketsServiceGetTicketResponse - 15, // 50: c1.connector.v2.TicketsService.ListTicketSchemas:output_type -> c1.connector.v2.TicketsServiceListTicketSchemasResponse - 13, // 51: c1.connector.v2.TicketsService.GetTicketSchema:output_type -> c1.connector.v2.TicketsServiceGetTicketSchemaResponse - 48, // [48:52] is the sub-list for method output_type - 44, // [44:48] is the sub-list for method input_type - 44, // [44:44] is the sub-list for extension type_name - 44, // [44:44] is the sub-list for extension extendee - 0, // [0:44] is the sub-list for field type_name + 28, // 30: c1.connector.v2.Ticket.requested_for:type_name -> c1.connector.v2.Resource + 11, // 31: c1.connector.v2.TicketRequest.status:type_name -> c1.connector.v2.TicketStatus + 17, // 32: c1.connector.v2.TicketRequest.type:type_name -> c1.connector.v2.TicketType + 25, // 33: c1.connector.v2.TicketRequest.custom_fields:type_name -> c1.connector.v2.TicketRequest.CustomFieldsEntry + 28, // 34: c1.connector.v2.TicketRequest.requested_for:type_name -> c1.connector.v2.Resource + 18, // 35: c1.connector.v2.TicketsServiceCreateTicketRequest.request:type_name -> c1.connector.v2.TicketRequest + 0, // 36: c1.connector.v2.TicketsServiceCreateTicketRequest.schema:type_name -> c1.connector.v2.TicketSchema + 27, // 37: c1.connector.v2.TicketsServiceCreateTicketRequest.annotations:type_name -> google.protobuf.Any + 16, // 38: c1.connector.v2.TicketsServiceCreateTicketResponse.ticket:type_name -> c1.connector.v2.Ticket + 27, // 39: c1.connector.v2.TicketsServiceCreateTicketResponse.annotations:type_name -> google.protobuf.Any + 27, // 40: c1.connector.v2.TicketsServiceGetTicketRequest.annotations:type_name -> google.protobuf.Any + 16, // 41: c1.connector.v2.TicketsServiceGetTicketResponse.ticket:type_name -> c1.connector.v2.Ticket + 27, // 42: c1.connector.v2.TicketsServiceGetTicketResponse.annotations:type_name -> google.protobuf.Any + 1, // 43: c1.connector.v2.TicketSchema.CustomFieldsEntry.value:type_name -> c1.connector.v2.TicketCustomField + 1, // 44: c1.connector.v2.Ticket.CustomFieldsEntry.value:type_name -> c1.connector.v2.TicketCustomField + 1, // 45: c1.connector.v2.TicketRequest.CustomFieldsEntry.value:type_name -> c1.connector.v2.TicketCustomField + 19, // 46: c1.connector.v2.TicketsService.CreateTicket:input_type -> c1.connector.v2.TicketsServiceCreateTicketRequest + 21, // 47: c1.connector.v2.TicketsService.GetTicket:input_type -> c1.connector.v2.TicketsServiceGetTicketRequest + 14, // 48: c1.connector.v2.TicketsService.ListTicketSchemas:input_type -> c1.connector.v2.TicketsServiceListTicketSchemasRequest + 12, // 49: c1.connector.v2.TicketsService.GetTicketSchema:input_type -> c1.connector.v2.TicketsServiceGetTicketSchemaRequest + 20, // 50: c1.connector.v2.TicketsService.CreateTicket:output_type -> c1.connector.v2.TicketsServiceCreateTicketResponse + 22, // 51: c1.connector.v2.TicketsService.GetTicket:output_type -> c1.connector.v2.TicketsServiceGetTicketResponse + 15, // 52: c1.connector.v2.TicketsService.ListTicketSchemas:output_type -> c1.connector.v2.TicketsServiceListTicketSchemasResponse + 13, // 53: c1.connector.v2.TicketsService.GetTicketSchema:output_type -> c1.connector.v2.TicketsServiceGetTicketSchemaResponse + 50, // [50:54] is the sub-list for method output_type + 46, // [46:50] is the sub-list for method input_type + 46, // [46:46] is the sub-list for extension type_name + 46, // [46:46] is the sub-list for extension extendee + 0, // [0:46] is the sub-list for field type_name } func init() { file_c1_connector_v2_ticket_proto_init() } diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/ticket.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/ticket.pb.validate.go index 3d9a46d..4aeef01 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/ticket.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/ticket.pb.validate.go @@ -2853,6 +2853,35 @@ func (m *Ticket) validate(all bool) error { } } + if all { + switch v := interface{}(m.GetRequestedFor()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TicketValidationError{ + field: "RequestedFor", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TicketValidationError{ + field: "RequestedFor", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequestedFor()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TicketValidationError{ + field: "RequestedFor", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return TicketMultiError(errors) } @@ -3163,6 +3192,35 @@ func (m *TicketRequest) validate(all bool) error { } } + if all { + switch v := interface{}(m.GetRequestedFor()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TicketRequestValidationError{ + field: "RequestedFor", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TicketRequestValidationError{ + field: "RequestedFor", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequestedFor()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TicketRequestValidationError{ + field: "RequestedFor", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return TicketRequestMultiError(errors) } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/annotations/annotations.go b/vendor/github.com/conductorone/baton-sdk/pkg/annotations/annotations.go index 84db79b..19eda94 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/annotations/annotations.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/annotations/annotations.go @@ -65,6 +65,10 @@ func (a *Annotations) Update(msg proto.Message) { *a = newAnnotations } +func (a *Annotations) Merge(newAnnotations ...*anypb.Any) { + *a = append(*a, newAnnotations...) +} + // Contains checks if the message is in the annotations slice. func (a *Annotations) Contains(msg proto.Message) bool { if msg == nil { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/cli.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/cli.go deleted file mode 100644 index 2b1918d..0000000 --- a/vendor/github.com/conductorone/baton-sdk/pkg/cli/cli.go +++ /dev/null @@ -1,495 +0,0 @@ -package cli - -import ( - "bufio" - "context" - "encoding/base64" - "fmt" - "os" - - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "github.com/spf13/cobra" - "go.uber.org/zap" - "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/anypb" - - "github.com/conductorone/baton-sdk/internal/connector" - v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" - v1 "github.com/conductorone/baton-sdk/pb/c1/connector_wrapper/v1" - "github.com/conductorone/baton-sdk/pkg/connectorrunner" - "github.com/conductorone/baton-sdk/pkg/logging" - "github.com/conductorone/baton-sdk/pkg/types" -) - -const ( - envPrefix = "baton" - defaultLogLevel = "info" - defaultLogFormat = logging.LogFormatJSON -) - -// NewCmd returns a new cobra command that will populate the provided config object, validate it, and run the provided run function. -func NewCmd[T any, PtrT *T]( - ctx context.Context, - name string, - cfg PtrT, - validateF func(ctx context.Context, cfg PtrT) error, - getConnector func(ctx context.Context, cfg PtrT) (types.ConnectorServer, error), - opts ...connectorrunner.Option, -) (*cobra.Command, error) { - err := setupService(name) - if err != nil { - return nil, err - } - - cmd := &cobra.Command{ - Use: name, - Short: name, - SilenceErrors: true, - SilenceUsage: true, - RunE: func(cmd *cobra.Command, args []string) error { - v, err := loadConfig(cmd, cfg) - if err != nil { - return err - } - - runCtx, err := initLogger( - ctx, - name, - logging.WithLogFormat(v.GetString("log-format")), - logging.WithLogLevel(v.GetString("log-level")), - ) - if err != nil { - return err - } - - err = validateF(ctx, cfg) - if err != nil { - return err - } - - l := ctxzap.Extract(runCtx) - - if isService() { - runCtx, err = runService(runCtx, name) - if err != nil { - l.Error("error running service", zap.Error(err)) - return err - } - } - - c, err := getConnector(runCtx, cfg) - if err != nil { - return err - } - - daemonMode := v.GetString("client-id") != "" || isService() - if daemonMode { - if v.GetString("client-id") == "" { - return fmt.Errorf("client-id is required in service mode") - } - if v.GetString("client-secret") == "" { - return fmt.Errorf("client-secret is required in service mode") - } - opts = append(opts, connectorrunner.WithClientCredentials(v.GetString("client-id"), v.GetString("client-secret"))) - } else { - switch { - case v.GetString("grant-entitlement") != "": - opts = append(opts, - connectorrunner.WithProvisioningEnabled(), - connectorrunner.WithOnDemandGrant( - v.GetString("file"), - v.GetString("grant-entitlement"), - v.GetString("grant-principal"), - v.GetString("grant-principal-type"), - )) - case v.GetString("revoke-grant") != "": - opts = append(opts, - connectorrunner.WithProvisioningEnabled(), - connectorrunner.WithOnDemandRevoke( - v.GetString("file"), - v.GetString("revoke-grant"), - )) - case v.GetBool("event-feed"): - opts = append(opts, connectorrunner.WithOnDemandEventStream()) - case v.GetString("create-account-login") != "": - opts = append(opts, - connectorrunner.WithProvisioningEnabled(), - connectorrunner.WithOnDemandCreateAccount( - v.GetString("file"), - v.GetString("create-account-login"), - v.GetString("create-account-email"), - )) - case v.GetString("delete-resource") != "": - opts = append(opts, - connectorrunner.WithProvisioningEnabled(), - connectorrunner.WithOnDemandDeleteResource( - v.GetString("file"), - v.GetString("delete-resource"), - v.GetString("delete-resource-type"), - )) - case v.GetString("rotate-credentials") != "": - opts = append(opts, - connectorrunner.WithProvisioningEnabled(), - connectorrunner.WithOnDemandRotateCredentials( - v.GetString("file"), - v.GetString("rotate-credentials"), - v.GetString("rotate-credentials-type"), - )) - case v.GetBool("create-ticket"): - opts = append(opts, - connectorrunner.WithTicketingEnabled(), - connectorrunner.WithCreateTicket(v.GetString("ticket-template-path"))) - case v.GetBool("list-ticket-schemas"): - opts = append(opts, - connectorrunner.WithTicketingEnabled(), - connectorrunner.WithListTicketSchemas()) - case v.GetBool("get-ticket"): - opts = append(opts, - connectorrunner.WithTicketingEnabled(), - connectorrunner.WithGetTicket(v.GetString("ticket-id"))) - default: - opts = append(opts, connectorrunner.WithOnDemandSync(v.GetString("file"))) - } - } - - if v.GetString("c1z-temp-dir") != "" { - c1zTmpDir := v.GetString("c1z-temp-dir") - if _, err := os.Stat(c1zTmpDir); os.IsNotExist(err) { - return fmt.Errorf("the specified c1z temp dir does not exist: %s", c1zTmpDir) - } - opts = append(opts, connectorrunner.WithTempDir(v.GetString("c1z-temp-dir"))) - } - - r, err := connectorrunner.NewConnectorRunner(runCtx, c, opts...) - if err != nil { - l.Error("error creating connector runner", zap.Error(err)) - return err - } - defer r.Close(runCtx) - - err = r.Run(runCtx) - if err != nil { - l.Error("error running connector", zap.Error(err)) - return err - } - - return nil - }, - } - - grpcServerCmd := &cobra.Command{ - Use: "_connector-service", - Short: "Start the connector service", - Hidden: true, - RunE: func(cmd *cobra.Command, args []string) error { - v, err := loadConfig(cmd, cfg) - if err != nil { - return err - } - - runCtx, err := initLogger( - ctx, - name, - logging.WithLogFormat(v.GetString("log-format")), - logging.WithLogLevel(v.GetString("log-level")), - ) - if err != nil { - return err - } - - err = validateF(runCtx, cfg) - if err != nil { - return err - } - - c, err := getConnector(runCtx, cfg) - if err != nil { - return err - } - - var copts []connector.Option - - if v.GetBool("provisioning") { - copts = append(copts, connector.WithProvisioningEnabled()) - } - - if v.GetBool("ticketing") { - copts = append(copts, connector.WithTicketingEnabled()) - } - - switch { - case v.GetString("grant-entitlement") != "": - copts = append(copts, connector.WithProvisioningEnabled()) - case v.GetString("revoke-grant") != "": - copts = append(copts, connector.WithProvisioningEnabled()) - case v.GetString("create-account-login") != "" || v.GetString("create-account-email") != "": - copts = append(copts, connector.WithProvisioningEnabled()) - case v.GetString("delete-resource") != "" || v.GetString("delete-resource-type") != "": - copts = append(copts, connector.WithProvisioningEnabled()) - case v.GetString("rotate-credentials") != "" || v.GetString("rotate-credentials-type") != "": - copts = append(copts, connector.WithProvisioningEnabled()) - case v.GetBool("create-ticket"): - copts = append(copts, connector.WithTicketingEnabled()) - case v.GetBool("list-ticket-schemas"): - copts = append(copts, connector.WithTicketingEnabled()) - case v.GetBool("get-ticket"): - copts = append(copts, connector.WithTicketingEnabled()) - } - - cw, err := connector.NewWrapper(runCtx, c, copts...) - if err != nil { - return err - } - - var cfgStr string - scn := bufio.NewScanner(os.Stdin) - for scn.Scan() { - cfgStr = scn.Text() - break - } - cfgBytes, err := base64.StdEncoding.DecodeString(cfgStr) - if err != nil { - return err - } - - go func() { - in := make([]byte, 1) - _, err := os.Stdin.Read(in) - if err != nil { - os.Exit(0) - } - }() - - if len(cfgBytes) == 0 { - return fmt.Errorf("unexpected empty input") - } - - serverCfg := &v1.ServerConfig{} - err = proto.Unmarshal(cfgBytes, serverCfg) - if err != nil { - return err - } - - err = serverCfg.ValidateAll() - if err != nil { - return err - } - - return cw.Run(runCtx, serverCfg) - }, - } - - capabilitiesCmd := &cobra.Command{ - Use: "capabilities", - Short: "Get connector capabilities", - RunE: func(cmd *cobra.Command, args []string) error { - v, err := loadConfig(cmd, cfg) - if err != nil { - return err - } - - runCtx, err := initLogger( - ctx, - name, - logging.WithLogFormat(v.GetString("log-format")), - logging.WithLogLevel(v.GetString("log-level")), - ) - if err != nil { - return err - } - - c, err := getConnector(runCtx, cfg) - if err != nil { - return err - } - - md, err := c.GetMetadata(runCtx, &v2.ConnectorServiceGetMetadataRequest{}) - if err != nil { - return err - } - - if md.Metadata.Capabilities == nil { - return fmt.Errorf("connector does not support capabilities") - } - - protoMarshaller := protojson.MarshalOptions{ - Multiline: true, - Indent: " ", - } - - a := &anypb.Any{} - err = anypb.MarshalFrom(a, md.Metadata.Capabilities, proto.MarshalOptions{Deterministic: true}) - if err != nil { - return err - } - - outBytes, err := protoMarshaller.Marshal(a) - if err != nil { - return err - } - - _, err = fmt.Fprint(os.Stdout, string(outBytes)) - if err != nil { - return err - } - - return nil - }, - } - - cmd.AddCommand(grpcServerCmd) - cmd.AddCommand(capabilitiesCmd) - - // Flags for file management - cmd.PersistentFlags().String("c1z-temp-dir", "", "The directory to store temporary files in. It "+ - "must exist, and write access is required. Defaults to the OS temporary directory. ($BATON_C1Z_TEMP_DIR)") - if err := cmd.PersistentFlags().MarkHidden("c1z-temp-dir"); err != nil { - return nil, err - } - - // Flags for logging configuration - cmd.PersistentFlags().String("log-level", defaultLogLevel, "The log level: debug, info, warn, error ($BATON_LOG_LEVEL)") - cmd.PersistentFlags().String("log-format", defaultLogFormat, "The output format for logs: json, console ($BATON_LOG_FORMAT)") - - // Flags for direct syncing and provisioning - cmd.PersistentFlags().StringP("file", "f", "sync.c1z", "The path to the c1z file to sync with ($BATON_FILE)") - - // TODO (ggreer): simplify command line flags. make one action and reuse entitlement, resource, etc. - // baton-connector --provision-action=grant --entitlement=entitlement_id --resource=resource_id --resource-type=resource_type - // baton-connector --provision-action=revoke --grant=grant_id - // baton-connector --provision-action=delete --resource-id=resource_id --resource-type=resource_type - // baton-connector --provision-action=create-account --login=login --email=email - // baton-connector --provision-action=rotate-credentials --resource-id=resource_id --resource-type=resource_type - - cmd.PersistentFlags().String("grant-entitlement", "", "The id of the entitlement to grant to the supplied principal ($BATON_GRANT_ENTITLEMENT)") - cmd.PersistentFlags().String("grant-principal", "", "The id of the resource to grant the entitlement to ($BATON_GRANT_PRINCIPAL)") - cmd.PersistentFlags().String("grant-principal-type", "", "The resource type of the principal to grant the entitlement to ($BATON_GRANT_PRINCIPAL_TYPE)") - cmd.PersistentFlags().String("revoke-grant", "", "The grant to revoke ($BATON_REVOKE_GRANT)") - cmd.PersistentFlags().Bool("event-feed", false, "Read feed events to stdout ($BATON_EVENT_FEED)") - cmd.MarkFlagsRequiredTogether("grant-entitlement", "grant-principal", "grant-principal-type") - - cmd.PersistentFlags().String("create-account-login", "", "The login of the account to create ($BATON_CREATE_ACCOUNT_LOGIN)") - cmd.PersistentFlags().String("create-account-email", "", "The email of the account to create ($BATON_CREATE_ACCOUNT_EMAIL)") - - cmd.PersistentFlags().String("delete-resource", "", "The id of the resource to delete ($BATON_DELETE_RESOURCE)") - cmd.PersistentFlags().String("delete-resource-type", "", "The type of the resource to delete ($BATON_DELETE_RESOURCE_TYPE)") - - cmd.PersistentFlags().String("rotate-credentials", "", "The id of the resource to rotate credentials on ($BATON_ROTATE_CREDENTIALS)") - cmd.PersistentFlags().String("rotate-credentials-type", "", "The type of the resource to rotate credentials on ($BATON_ROTATE_CREDENTIALS_TYPE)") - - cmd.PersistentFlags().Bool("ticketing", false, "This must be set to enable ticketing support ($BATON_TICKETING)") - cmd.PersistentFlags().Bool("create-ticket", false, "Create ticket ($BATON_CREATE_TICKET)") - cmd.PersistentFlags().String("ticket-template-path", "", "A JSON file describing the ticket to create ($BATON_TICKET_TEMPLATE_PATH)") - - cmd.PersistentFlags().Bool("list-ticket-schemas", false, "List ticket schemas ($BATON_LIST_SCHEMAS)") - - cmd.PersistentFlags().Bool("get-ticket", false, "Get ticket ($BATON_GET_TICKET)") - cmd.PersistentFlags().String("ticket-id", "", "The ID of the ticket to get ($BATON_TICKET_ID)") - - cmd.MarkFlagsMutuallyExclusive( - "grant-entitlement", - "revoke-grant", - "create-account-login", - "delete-resource", - "rotate-credentials", - "event-feed", - "create-ticket", - "get-ticket", - "list-ticket-schemas", - ) - cmd.MarkFlagsMutuallyExclusive( - "grant-entitlement", - "revoke-grant", - "create-account-email", - "delete-resource-type", - "rotate-credentials-type", - "event-feed", - "create-ticket", - "get-ticket", - "list-ticket-schemas", - ) - err = cmd.PersistentFlags().MarkHidden("grant-entitlement") - if err != nil { - return nil, err - } - err = cmd.PersistentFlags().MarkHidden("grant-principal") - if err != nil { - return nil, err - } - err = cmd.PersistentFlags().MarkHidden("grant-principal-type") - if err != nil { - return nil, err - } - err = cmd.PersistentFlags().MarkHidden("revoke-grant") - if err != nil { - return nil, err - } - err = cmd.PersistentFlags().MarkHidden("event-feed") - if err != nil { - return nil, err - } - err = cmd.PersistentFlags().MarkHidden("create-ticket") - if err != nil { - return nil, err - } - err = cmd.PersistentFlags().MarkHidden("ticket-template-path") - if err != nil { - return nil, err - } - err = cmd.PersistentFlags().MarkHidden("list-ticket-schemas") - if err != nil { - return nil, err - } - err = cmd.PersistentFlags().MarkHidden("get-ticket") - if err != nil { - return nil, err - } - err = cmd.PersistentFlags().MarkHidden("ticket-id") - if err != nil { - return nil, err - } - err = cmd.PersistentFlags().MarkHidden("create-account-login") - if err != nil { - return nil, err - } - err = cmd.PersistentFlags().MarkHidden("create-account-email") - if err != nil { - return nil, err - } - err = cmd.PersistentFlags().MarkHidden("delete-resource") - if err != nil { - return nil, err - } - err = cmd.PersistentFlags().MarkHidden("delete-resource-type") - if err != nil { - return nil, err - } - err = cmd.PersistentFlags().MarkHidden("rotate-credentials") - if err != nil { - return nil, err - } - err = cmd.PersistentFlags().MarkHidden("rotate-credentials-type") - if err != nil { - return nil, err - } - - // Flags for daemon mode - cmd.PersistentFlags().String("client-id", "", "The client ID used to authenticate with ConductorOne ($BATON_CLIENT_ID)") - cmd.PersistentFlags().String("client-secret", "", "The client secret used to authenticate with ConductorOne ($BATON_CLIENT_SECRET)") - cmd.PersistentFlags().BoolP("provisioning", "p", false, "This must be set in order for provisioning actions to be enabled. ($BATON_PROVISIONING)") - cmd.MarkFlagsRequiredTogether("client-id", "client-secret") - cmd.MarkFlagsMutuallyExclusive("file", "client-id") - cmd.MarkFlagsRequiredTogether("create-ticket", "ticket-template-path") - cmd.MarkFlagsRequiredTogether("get-ticket", "ticket-id") - // Add a hook for additional commands to be added to the root command. - // We use this for OS specific commands. - cmd.AddCommand(additionalCommands(name, cfg)...) - - err = configToCmdFlags(cmd, cfg) - if err != nil { - return nil, err - } - - return cmd, nil -} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go new file mode 100644 index 0000000..34edb0c --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go @@ -0,0 +1,323 @@ +package cli + +import ( + "bufio" + "context" + "encoding/base64" + "fmt" + "os" + + "github.com/conductorone/baton-sdk/internal/connector" + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + v1 "github.com/conductorone/baton-sdk/pb/c1/connector_wrapper/v1" + "github.com/conductorone/baton-sdk/pkg/connectorrunner" + "github.com/conductorone/baton-sdk/pkg/field" + "github.com/conductorone/baton-sdk/pkg/logging" + "github.com/conductorone/baton-sdk/pkg/types" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "go.uber.org/zap" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +type GetConnectorFunc func(context.Context, *viper.Viper) (types.ConnectorServer, error) + +func MakeMainCommand( + ctx context.Context, + name string, + v *viper.Viper, + confschema field.Configuration, + getconnector GetConnectorFunc, + opts ...connectorrunner.Option, +) func(*cobra.Command, []string) error { + return func(*cobra.Command, []string) error { + // validate required fields and relationship constraints + if err := field.Validate(confschema, v); err != nil { + return err + } + + runCtx, err := initLogger( + ctx, + name, + logging.WithLogFormat(v.GetString("log-format")), + logging.WithLogLevel(v.GetString("log-level")), + ) + if err != nil { + return err + } + + l := ctxzap.Extract(runCtx) + + if isService() { + runCtx, err = runService(runCtx, name) + if err != nil { + l.Error("error running service", zap.Error(err)) + return err + } + } + + c, err := getconnector(runCtx, v) + if err != nil { + return err + } + + daemonMode := v.GetString("client-id") != "" || isService() + if daemonMode { + if v.GetString("client-id") == "" { + return fmt.Errorf("client-id is required in service mode") + } + if v.GetString("client-secret") == "" { + return fmt.Errorf("client-secret is required in service mode") + } + opts = append( + opts, + connectorrunner.WithClientCredentials( + v.GetString("client-id"), + v.GetString("client-secret"), + ), + ) + } else { + switch { + case v.GetString("grant-entitlement") != "": + opts = append(opts, + connectorrunner.WithProvisioningEnabled(), + connectorrunner.WithOnDemandGrant( + v.GetString("file"), + v.GetString("grant-entitlement"), + v.GetString("grant-principal"), + v.GetString("grant-principal-type"), + )) + case v.GetString("revoke-grant") != "": + opts = append(opts, + connectorrunner.WithProvisioningEnabled(), + connectorrunner.WithOnDemandRevoke( + v.GetString("file"), + v.GetString("revoke-grant"), + )) + case v.GetBool("event-feed"): + opts = append(opts, connectorrunner.WithOnDemandEventStream()) + case v.GetString("create-account-login") != "": + opts = append(opts, + connectorrunner.WithProvisioningEnabled(), + connectorrunner.WithOnDemandCreateAccount( + v.GetString("file"), + v.GetString("create-account-login"), + v.GetString("create-account-email"), + )) + case v.GetString("delete-resource") != "": + opts = append(opts, + connectorrunner.WithProvisioningEnabled(), + connectorrunner.WithOnDemandDeleteResource( + v.GetString("file"), + v.GetString("delete-resource"), + v.GetString("delete-resource-type"), + )) + case v.GetString("rotate-credentials") != "": + opts = append(opts, + connectorrunner.WithProvisioningEnabled(), + connectorrunner.WithOnDemandRotateCredentials( + v.GetString("file"), + v.GetString("rotate-credentials"), + v.GetString("rotate-credentials-type"), + )) + case v.GetBool("create-ticket"): + opts = append(opts, + connectorrunner.WithTicketingEnabled(), + connectorrunner.WithCreateTicket(v.GetString("ticket-template-path"))) + case v.GetBool("list-ticket-schemas"): + opts = append(opts, + connectorrunner.WithTicketingEnabled(), + connectorrunner.WithListTicketSchemas()) + case v.GetBool("get-ticket"): + opts = append(opts, + connectorrunner.WithTicketingEnabled(), + connectorrunner.WithGetTicket(v.GetString("ticket-id"))) + default: + opts = append(opts, connectorrunner.WithOnDemandSync(v.GetString("file"))) + } + } + + if v.GetString("c1z-temp-dir") != "" { + c1zTmpDir := v.GetString("c1z-temp-dir") + if _, err := os.Stat(c1zTmpDir); os.IsNotExist(err) { + return fmt.Errorf("the specified c1z temp dir does not exist: %s", c1zTmpDir) + } + opts = append(opts, connectorrunner.WithTempDir(v.GetString("c1z-temp-dir"))) + } + + r, err := connectorrunner.NewConnectorRunner(runCtx, c, opts...) + if err != nil { + l.Error("error creating connector runner", zap.Error(err)) + return err + } + defer r.Close(runCtx) + + err = r.Run(runCtx) + if err != nil { + l.Error("error running connector", zap.Error(err)) + return err + } + + return nil + } +} + +func MakeGRPCServerCommand( + ctx context.Context, + name string, + v *viper.Viper, + confschema field.Configuration, + getconnector GetConnectorFunc, +) func(*cobra.Command, []string) error { + return func(*cobra.Command, []string) error { + // validate required fields and relationship constraints + if err := field.Validate(confschema, v); err != nil { + return err + } + + runCtx, err := initLogger( + ctx, + name, + logging.WithLogFormat(v.GetString("log-format")), + logging.WithLogLevel(v.GetString("log-level")), + ) + if err != nil { + return err + } + + c, err := getconnector(runCtx, v) + if err != nil { + return err + } + + var copts []connector.Option + + if v.GetBool("provisioning") { + copts = append(copts, connector.WithProvisioningEnabled()) + } + + if v.GetBool("ticketing") { + copts = append(copts, connector.WithTicketingEnabled()) + } + + switch { + case v.GetString("grant-entitlement") != "": + copts = append(copts, connector.WithProvisioningEnabled()) + case v.GetString("revoke-grant") != "": + copts = append(copts, connector.WithProvisioningEnabled()) + case v.GetString("create-account-login") != "" || v.GetString("create-account-email") != "": + copts = append(copts, connector.WithProvisioningEnabled()) + case v.GetString("delete-resource") != "" || v.GetString("delete-resource-type") != "": + copts = append(copts, connector.WithProvisioningEnabled()) + case v.GetString("rotate-credentials") != "" || v.GetString("rotate-credentials-type") != "": + copts = append(copts, connector.WithProvisioningEnabled()) + case v.GetBool("create-ticket"): + copts = append(copts, connector.WithTicketingEnabled()) + case v.GetBool("list-ticket-schemas"): + copts = append(copts, connector.WithTicketingEnabled()) + case v.GetBool("get-ticket"): + copts = append(copts, connector.WithTicketingEnabled()) + } + + cw, err := connector.NewWrapper(runCtx, c, copts...) + if err != nil { + return err + } + + var cfgStr string + scn := bufio.NewScanner(os.Stdin) + for scn.Scan() { + cfgStr = scn.Text() + break + } + cfgBytes, err := base64.StdEncoding.DecodeString(cfgStr) + if err != nil { + return err + } + + // NOTE (shackra): I don't understand this goroutine + go func() { + in := make([]byte, 1) + _, err := os.Stdin.Read(in) + if err != nil { + os.Exit(0) + } + }() + + if len(cfgBytes) == 0 { + return fmt.Errorf("unexpected empty input") + } + + serverCfg := &v1.ServerConfig{} + err = proto.Unmarshal(cfgBytes, serverCfg) + if err != nil { + return err + } + + err = serverCfg.ValidateAll() + if err != nil { + return err + } + + return cw.Run(runCtx, serverCfg) + } +} + +func MakeCapabilitiesCommand( + ctx context.Context, + name string, + v *viper.Viper, + getconnector GetConnectorFunc, +) func(*cobra.Command, []string) error { + return func(*cobra.Command, []string) error { + runCtx, err := initLogger( + ctx, + name, + logging.WithLogFormat(v.GetString("log-format")), + logging.WithLogLevel(v.GetString("log-level")), + ) + if err != nil { + return err + } + + c, err := getconnector(runCtx, v) + if err != nil { + return err + } + + md, err := c.GetMetadata(runCtx, &v2.ConnectorServiceGetMetadataRequest{}) + if err != nil { + return err + } + + if md.Metadata.Capabilities == nil { + return fmt.Errorf("connector does not support capabilities") + } + + protoMarshaller := protojson.MarshalOptions{ + Multiline: true, + Indent: " ", + } + + a := &anypb.Any{} + err = anypb.MarshalFrom(a, md.Metadata.Capabilities, proto.MarshalOptions{Deterministic: true}) + if err != nil { + return err + } + + outBytes, err := protoMarshaller.Marshal(a) + if err != nil { + return err + } + + _, err = fmt.Fprint(os.Stdout, string(outBytes)) + if err != nil { + return err + } + + return nil + } +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/config.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/config.go deleted file mode 100644 index a271a3e..0000000 --- a/vendor/github.com/conductorone/baton-sdk/pkg/cli/config.go +++ /dev/null @@ -1,146 +0,0 @@ -package cli - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "reflect" - "strconv" - "strings" - - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -type BaseConfig struct { - LogLevel string `mapstructure:"log-level"` - LogFormat string `mapstructure:"log-format"` - C1zPath string `mapstructure:"file"` - ClientID string `mapstructure:"client-id"` - ClientSecret string `mapstructure:"client-secret"` - GrantEntitlementID string `mapstructure:"grant-entitlement"` - GrantPrincipalID string `mapstructure:"grant-principal"` - GrantPrincipalType string `mapstructure:"grant-principal-type"` - RevokeGrantID string `mapstructure:"revoke-grant"` - C1zTempDir string `mapstructure:"c1z-temp-dir"` -} - -func getConfigPath(customPath string) (string, string, error) { - if customPath != "" { - cfgDir, cfgFile := filepath.Split(filepath.Clean(customPath)) - if cfgDir == "" { - cfgDir = "." - } - - ext := filepath.Ext(cfgFile) - if ext == "" || (ext != ".yaml" && ext != ".yml") { - return "", "", errors.New("expected config file to have .yaml or .yml extension") - } - - return strings.TrimSuffix(cfgDir, string(filepath.Separator)), strings.TrimSuffix(cfgFile, ext), nil - } - - return ".", ".baton", nil -} - -// loadConfig sets viper up to parse the config into the provided configuration object. -func loadConfig[T any, PtrT *T](cmd *cobra.Command, cfg PtrT) (*viper.Viper, error) { - v := viper.New() - v.SetConfigType("yaml") - - cfgPath, cfgName, err := getConfigPath(os.Getenv("BATON_CONFIG_PATH")) - if err != nil { - return nil, err - } - - v.SetConfigName(cfgName) - v.AddConfigPath(cfgPath) - - if err := v.ReadInConfig(); err != nil { - if ok := !errors.Is(err, viper.ConfigFileNotFoundError{}); !ok { - return nil, err - } - } - - v.SetEnvPrefix(envPrefix) - v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) - v.AutomaticEnv() - if err := v.BindPFlags(cmd.PersistentFlags()); err != nil { - return nil, err - } - if err := v.BindPFlags(cmd.Flags()); err != nil { - return nil, err - } - - if err := v.Unmarshal(cfg); err != nil { - return nil, err - } - - return v, nil -} - -func configToCmdFlags[T any, PtrT *T](cmd *cobra.Command, cfg PtrT) error { - baseConfigFields := reflect.VisibleFields(reflect.TypeOf(BaseConfig{})) - baseConfigFieldsMap := make(map[string]bool) - for _, field := range baseConfigFields { - baseConfigFieldsMap[field.Name] = true - } - - fields := reflect.VisibleFields(reflect.TypeOf(*cfg)) - for _, field := range fields { - // ignore BaseConfig fields - if _, ok := baseConfigFieldsMap[field.Name]; ok { - continue - } - if field.Name == "BaseConfig" { - continue - } - - cfgField := field.Tag.Get("mapstructure") - if cfgField == "" { - return fmt.Errorf("mapstructure tag is required on config field %s", field.Name) - } - description := field.Tag.Get("description") - if description == "" { - // Skip fields without descriptions for backwards compatibility - continue - } - defaultValueStr := field.Tag.Get("defaultValue") - - envVarName := strings.ReplaceAll(strings.ToUpper(cfgField), "-", "_") - description = fmt.Sprintf("%s ($BATON_%s)", description, envVarName) - switch field.Type.Kind() { - case reflect.String: - cmd.PersistentFlags().String(cfgField, defaultValueStr, description) - case reflect.Bool: - defaultValue, err := strconv.ParseBool(defaultValueStr) - if defaultValueStr != "" && err != nil { - return fmt.Errorf("invalid default value for config field %s: %w", field.Name, err) - } - cmd.PersistentFlags().Bool(cfgField, defaultValue, description) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - defaultValue, err := strconv.ParseInt(defaultValueStr, 10, 64) - if defaultValueStr != "" && err != nil { - return fmt.Errorf("invalid default value for config field %s: %w", field.Name, err) - } - cmd.PersistentFlags().Int64(cfgField, defaultValue, description) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - defaultValue, err := strconv.ParseUint(defaultValueStr, 10, 64) - if defaultValueStr != "" && err != nil { - return fmt.Errorf("invalid default value for config field %s: %w", field.Name, err) - } - cmd.PersistentFlags().Uint64(cfgField, defaultValue, description) - case reflect.Float32, reflect.Float64: - defaultValue, err := strconv.ParseFloat(defaultValueStr, 64) - if defaultValueStr != "" && err != nil { - return fmt.Errorf("invalid default value for config field %s: %w", field.Name, err) - } - cmd.PersistentFlags().Float64(cfgField, defaultValue, description) - default: - return fmt.Errorf("unsupported type %s for config field %s", field.Type.Kind(), field.Name) - } - } - - return nil -} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/service_unix.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/service_unix.go index 19d9533..d9e8ad8 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/cli/service_unix.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/cli/service_unix.go @@ -5,6 +5,7 @@ package cli import ( "context" + "github.com/conductorone/baton-sdk/pkg/field" "github.com/conductorone/baton-sdk/pkg/logging" "github.com/spf13/cobra" ) @@ -13,18 +14,14 @@ func isService() bool { return false } -func setupService(name string) error { - return nil -} - -func additionalCommands[T any, PtrT *T](connectorName string, cfg PtrT) []*cobra.Command { - return nil -} - -func runService(ctx context.Context, name string) (context.Context, error) { +func runService(ctx context.Context, _ string) (context.Context, error) { return ctx, nil } -func initLogger(ctx context.Context, name string, opts ...logging.Option) (context.Context, error) { +func initLogger(ctx context.Context, _ string, opts ...logging.Option) (context.Context, error) { return logging.Init(ctx, opts...) } + +func AdditionalCommands(_ string, _ []field.SchemaField) []*cobra.Command { + return nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/service_windows.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/service_windows.go index ed2d4a9..5ba7498 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/cli/service_windows.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/cli/service_windows.go @@ -11,9 +11,9 @@ import ( "path/filepath" "reflect" "strconv" - "strings" "time" + "github.com/conductorone/baton-sdk/pkg/field" "github.com/conductorone/baton-sdk/pkg/logging" "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" "github.com/spf13/cobra" @@ -272,48 +272,50 @@ func getWindowsService(ctx context.Context, name string) (*mgr.Service, func(), }, nil } -func interactiveSetup[T any, PtrT *T](ctx context.Context, outputFilePath string, cfg PtrT) error { +func interactiveSetup(ctx context.Context, outputFilePath string, fields []field.SchemaField) error { l := ctxzap.Extract(ctx) - var ret []reflect.StructField - fields := reflect.VisibleFields(reflect.TypeOf(*cfg)) - for _, field := range fields { - if _, ok := skipServiceSetupFields[field.Name]; !ok { - ret = append(ret, field) + config := make(map[string]interface{}) + for _, vfield := range fields { + if vfield.GetName() == "" { + return fmt.Errorf("field has no name") } - } - config := make(map[string]interface{}) - for _, field := range ret { - var input string - cfgField := field.Tag.Get("mapstructure") - if cfgField == "" { - return fmt.Errorf("mapstructure tag is required on config field %s", field.Name) + // ignore any fields from the default set + if field.IsFieldAmongDefaultList(vfield) { + continue } - fmt.Printf("Enter %s: ", field.Name) + var input string + fmt.Printf("Enter %s: ", vfield.GetName()) scanner := bufio.NewScanner(os.Stdin) for scanner.Scan() { input = scanner.Text() break } - switch reflect.ValueOf(cfg).Elem().FieldByName(field.Name).Type() { - case stringReflectType: - config[cfgField] = input - - case boolReflectType: + switch vfield.GetType() { + case reflect.Bool: b, err := strconv.ParseBool(input) if err != nil { return err } - config[cfgField] = b + config[vfield.GetName()] = b - case stringSliceReflectType: - config[cfgField] = strings.Split(input, ",") + case reflect.String: + config[vfield.GetName()] = input + case reflect.Int: + i, err := strconv.Atoi(input) + if err != nil { + return err + } + + config[vfield.GetName()] = i + + // TODO (shackra): add support for []string in SDK default: - l.Error("Unsupported type for interactive config.", zap.String("type", field.Type.String())) + l.Error("Unsupported type for interactive config.", zap.String("type", vfield.GetType().String())) return errors.New("unsupported type for interactive config") } } @@ -349,52 +351,40 @@ func interactiveSetup[T any, PtrT *T](ctx context.Context, outputFilePath string return nil } -func installCmd[T any, PtrT *T](name string, cfg PtrT) *cobra.Command { +func installCmd(name string, fields []field.SchemaField) *cobra.Command { cmd := &cobra.Command{ Use: "setup", Short: fmt.Sprintf("Setup and configure the %s service", name), RunE: func(cmd *cobra.Command, args []string) error { - ctx, err := initLogger( - context.Background(), - name, - logging.WithLogFormat(logging.LogFormatConsole), - logging.WithLogLevel("info"), - ) - + ctx, err := initLogger(context.Background(), name, logging.WithLogFormat(logging.LogFormatConsole), logging.WithLogLevel("info")) l := ctxzap.Extract(ctx) - svcMgr, err := mgr.Connect() if err != nil { l.Error("Failed to connect to service manager.", zap.Error(err)) return err } defer svcMgr.Disconnect() - s, err := svcMgr.OpenService(name) if err == nil { s.Close() return fmt.Errorf("%s is already installed as a service. Please run '%s remove' to remove it first.", name, os.Args[0]) } - - err = interactiveSetup(ctx, filepath.Join(getConfigDir(name), defaultConfigFile), cfg) + err = interactiveSetup(ctx, filepath.Join(getConfigDir(name), defaultConfigFile), fields) if err != nil { l.Error("Failed to setup service.", zap.Error(err)) return err } - exePath, err := getExePath() if err != nil { l.Error("Failed to get executable path.", zap.Error(err)) return err } - s, err = svcMgr.CreateService(name, exePath, mgr.Config{DisplayName: name}) if err != nil { l.Error("Failed to create service.", zap.Error(err), zap.String("service_name", name)) return err } defer s.Close() - err = eventlog.InstallAsEventCreate(name, eventlog.Error|eventlog.Warning|eventlog.Info) if err != nil { l.Error("Failed to install event log source.", zap.Error(err)) @@ -405,7 +395,6 @@ func installCmd[T any, PtrT *T](name string, cfg PtrT) *cobra.Command { } return err } - l.Info("Successfully installed service.", zap.String("service_name", name)) return nil }, @@ -521,12 +510,12 @@ func runService(ctx context.Context, name string) (context.Context, error) { return ctx, nil } -func additionalCommands[T any, PtrT *T](connectorName string, cfg PtrT) []*cobra.Command { +func AdditionalCommands(connectorName string, fields []field.SchemaField) []*cobra.Command { return []*cobra.Command{ startCmd(connectorName), stopCmd(connectorName), statusCmd(connectorName), - installCmd(connectorName, cfg), + installCmd(connectorName, fields), uninstallCmd(connectorName), } } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go b/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go new file mode 100644 index 0000000..8b61b2c --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go @@ -0,0 +1,228 @@ +package config + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "strings" + + "github.com/conductorone/baton-sdk/pkg/cli" + "github.com/conductorone/baton-sdk/pkg/connectorrunner" + "github.com/conductorone/baton-sdk/pkg/field" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/spf13/viper" +) + +func DefineConfiguration( + ctx context.Context, + connectorName string, + connector cli.GetConnectorFunc, + schema field.Configuration, + options ...connectorrunner.Option, +) (*viper.Viper, *cobra.Command, error) { + v := viper.New() + v.SetConfigType("yaml") + + path, name, err := cleanOrGetConfigPath(os.Getenv("BATON_CONFIG_PATH")) + if err != nil { + return nil, nil, err + } + + v.SetConfigName(name) + v.AddConfigPath(path) + if err := v.ReadInConfig(); err != nil { + if errors.Is(err, viper.ConfigFileNotFoundError{}) { + return nil, nil, err + } + } + v.SetEnvPrefix("baton") + v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) + v.AutomaticEnv() + + // add default fields and constrains + schema.Fields = field.EnsureDefaultFieldsExists(schema.Fields) + schema.Constraints = field.EnsureDefaultRelationships(schema.Constraints) + + // setup CLI with cobra + mainCMD := &cobra.Command{ + Use: connectorName, + Short: connectorName, + SilenceErrors: true, + SilenceUsage: true, + RunE: cli.MakeMainCommand(ctx, connectorName, v, schema, connector, options...), + } + + // add options to the main command + for _, field := range schema.Fields { + switch field.FieldType { + case reflect.Bool: + value, err := field.Bool() + if err != nil { + return nil, nil, fmt.Errorf( + "field %s, %s: %w", + field.FieldName, + field.FieldType, + err, + ) + } + mainCMD.PersistentFlags(). + BoolP(field.FieldName, field.CLIShortHand, value, field.GetDescription()) + case reflect.Int: + value, err := field.Int() + if err != nil { + return nil, nil, fmt.Errorf( + "field %s, %s: %w", + field.FieldName, + field.FieldType, + err, + ) + } + mainCMD.PersistentFlags(). + IntP(field.FieldName, field.CLIShortHand, value, field.GetDescription()) + case reflect.String: + value, err := field.String() + if err != nil { + return nil, nil, fmt.Errorf( + "field %s, %s: %w", + field.FieldName, + field.FieldType, + err, + ) + } + mainCMD.PersistentFlags(). + StringP(field.FieldName, field.CLIShortHand, value, field.GetDescription()) + case reflect.Slice: + value, err := field.StringArray() + if err != nil { + return nil, nil, fmt.Errorf( + "field %s, %s: %w", + field.FieldName, + field.FieldType, + err, + ) + } + mainCMD.PersistentFlags(). + StringArrayP(field.FieldName, field.CLIShortHand, value, field.GetDescription()) + default: + return nil, nil, fmt.Errorf( + "field %s, %s is not yet supported", + field.FieldName, + field.FieldType, + ) + } + + // mark hidden + if field.Hidden { + err := mainCMD.PersistentFlags().MarkHidden(field.FieldName) + if err != nil { + return nil, nil, fmt.Errorf( + "cannot hide field %s, %s: %w", + field.FieldName, + field.FieldType, + err, + ) + } + } + + // mark required + if field.Required { + if field.FieldType == reflect.Bool { + return nil, nil, fmt.Errorf("requiring %s of type %s does not make sense", field.FieldName, field.FieldType) + } + + err := mainCMD.MarkPersistentFlagRequired(field.FieldName) + if err != nil { + return nil, nil, fmt.Errorf( + "cannot require field %s, %s: %w", + field.FieldName, + field.FieldType, + err, + ) + } + } + } + + // apply constrains + for _, constrain := range schema.Constraints { + switch constrain.Kind { + case field.MutuallyExclusive: + mainCMD.MarkFlagsMutuallyExclusive(listFieldConstrainsAsStrings(constrain)...) + case field.RequiredTogether: + mainCMD.MarkFlagsRequiredTogether(listFieldConstrainsAsStrings(constrain)...) + case field.AtLeastOne: + mainCMD.MarkFlagsOneRequired(listFieldConstrainsAsStrings(constrain)...) + case field.Dependents: + // do nothing + } + } + + if err := v.BindPFlags(mainCMD.PersistentFlags()); err != nil { + return nil, nil, err + } + if err := v.BindPFlags(mainCMD.Flags()); err != nil { + return nil, nil, err + } + + grpcServerCmd := &cobra.Command{ + Use: "_connector-service", + Short: "Start the connector service", + Hidden: true, + RunE: cli.MakeGRPCServerCommand(ctx, connectorName, v, schema, connector), + } + mainCMD.AddCommand(grpcServerCmd) + + capabilitiesCmd := &cobra.Command{ + Use: "capabilities", + Short: "Get connector capabilities", + RunE: cli.MakeCapabilitiesCommand(ctx, connectorName, v, connector), + } + mainCMD.AddCommand(capabilitiesCmd) + + mainCMD.AddCommand(cli.AdditionalCommands(name, schema.Fields)...) + + // NOTE (shackra): we don't check subcommands (i.e.: grpcServerCmd and capabilitiesCmd) + mainCMD.PersistentFlags().VisitAll(func(f *pflag.Flag) { + if v.IsSet(f.Name) { + _ = mainCMD.Flags().Set(f.Name, v.GetString(f.Name)) + } + }) + + return v, mainCMD, nil +} + +func listFieldConstrainsAsStrings(constrains field.SchemaFieldRelationship) []string { + var fields []string + for _, v := range constrains.Fields { + fields = append(fields, v.FieldName) + } + + return fields +} + +func cleanOrGetConfigPath(customPath string) (string, string, error) { + if customPath != "" { + cfgDir, cfgFile := filepath.Split(filepath.Clean(customPath)) + if cfgDir == "" { + cfgDir = "." + } + + ext := filepath.Ext(cfgFile) + if ext == "" || (ext != ".yaml" && ext != ".yml") { + return "", "", errors.New("expected config file to have .yaml or .yml extension") + } + + return strings.TrimSuffix( + cfgDir, + string(filepath.Separator), + ), strings.TrimSuffix( + cfgFile, + ext, + ), nil + } + + return ".", ".baton", nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go index 4f8531d..48616d4 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go @@ -2,8 +2,10 @@ package connectorbuilder import ( "context" + "errors" "fmt" "sort" + "time" "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" "go.uber.org/zap" @@ -15,8 +17,10 @@ import ( v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" "github.com/conductorone/baton-sdk/pkg/annotations" "github.com/conductorone/baton-sdk/pkg/crypto" + "github.com/conductorone/baton-sdk/pkg/metrics" "github.com/conductorone/baton-sdk/pkg/pagination" "github.com/conductorone/baton-sdk/pkg/types" + "github.com/conductorone/baton-sdk/pkg/types/tasks" ) type ResourceSyncer interface { @@ -83,10 +87,16 @@ type builderImpl struct { eventFeed EventProvider cb ConnectorBuilder ticketManager TicketManager + ticketingEnabled bool + m *metrics.M + nowFunc func() time.Time } func (b *builderImpl) ListTicketSchemas(ctx context.Context, request *v2.TicketsServiceListTicketSchemasRequest) (*v2.TicketsServiceListTicketSchemasResponse, error) { + start := b.nowFunc() + tt := tasks.ListTicketSchemasType if b.ticketManager == nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: ticket manager not implemented") } @@ -95,12 +105,15 @@ func (b *builderImpl) ListTicketSchemas(ctx context.Context, request *v2.Tickets Token: request.PageToken, }) if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: listing ticket schemas failed: %w", err) } if request.PageToken != "" && request.PageToken == nextPageToken { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: listing ticket schemas failed: next page token is the same as the current page token. this is most likely a connector bug") } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) return &v2.TicketsServiceListTicketSchemasResponse{ List: out, NextPageToken: nextPageToken, @@ -109,12 +122,16 @@ func (b *builderImpl) ListTicketSchemas(ctx context.Context, request *v2.Tickets } func (b *builderImpl) CreateTicket(ctx context.Context, request *v2.TicketsServiceCreateTicketRequest) (*v2.TicketsServiceCreateTicketResponse, error) { + start := b.nowFunc() + tt := tasks.CreateTicketType if b.ticketManager == nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: ticket manager not implemented") } reqBody := request.GetRequest() if reqBody == nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: request body is nil") } cTicket := &v2.Ticket{ @@ -124,13 +141,16 @@ func (b *builderImpl) CreateTicket(ctx context.Context, request *v2.TicketsServi Type: reqBody.GetType(), Labels: reqBody.GetLabels(), CustomFields: reqBody.GetCustomFields(), + RequestedFor: reqBody.GetRequestedFor(), } ticket, annos, err := b.ticketManager.CreateTicket(ctx, cTicket, request.GetSchema()) if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: creating ticket failed: %w", err) } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) return &v2.TicketsServiceCreateTicketResponse{ Ticket: ticket, Annotations: annos, @@ -138,15 +158,20 @@ func (b *builderImpl) CreateTicket(ctx context.Context, request *v2.TicketsServi } func (b *builderImpl) GetTicket(ctx context.Context, request *v2.TicketsServiceGetTicketRequest) (*v2.TicketsServiceGetTicketResponse, error) { + start := b.nowFunc() + tt := tasks.GetTicketType if b.ticketManager == nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: ticket manager not implemented") } ticket, annos, err := b.ticketManager.GetTicket(ctx, request.GetId()) if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: getting ticket failed: %w", err) } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) return &v2.TicketsServiceGetTicketResponse{ Ticket: ticket, Annotations: annos, @@ -154,15 +179,20 @@ func (b *builderImpl) GetTicket(ctx context.Context, request *v2.TicketsServiceG } func (b *builderImpl) GetTicketSchema(ctx context.Context, request *v2.TicketsServiceGetTicketSchemaRequest) (*v2.TicketsServiceGetTicketSchemaResponse, error) { + start := b.nowFunc() + tt := tasks.GetTicketSchemaType if b.ticketManager == nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: ticket manager not implemented") } ticketSchema, annos, err := b.ticketManager.GetTicketSchema(ctx, request.GetId()) if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: getting ticket metadata failed: %w", err) } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) return &v2.TicketsServiceGetTicketSchemaResponse{ Schema: ticketSchema, Annotations: annos, @@ -170,7 +200,7 @@ func (b *builderImpl) GetTicketSchema(ctx context.Context, request *v2.TicketsSe } // NewConnector creates a new ConnectorServer for a new resource. -func NewConnector(ctx context.Context, in interface{}) (types.ConnectorServer, error) { +func NewConnector(ctx context.Context, in interface{}, opts ...Opt) (types.ConnectorServer, error) { switch c := in.(type) { case ConnectorBuilder: ret := &builderImpl{ @@ -182,6 +212,16 @@ func NewConnector(ctx context.Context, in interface{}) (types.ConnectorServer, e credentialManagers: make(map[string]CredentialManager), cb: c, ticketManager: nil, + nowFunc: time.Now, + } + + err := ret.options(opts...) + if err != nil { + return nil, err + } + + if ret.m == nil { + ret.m = metrics.New(metrics.NewNoOpHandler(ctx)) } if b, ok := c.(EventProvider); ok { @@ -250,6 +290,35 @@ func NewConnector(ctx context.Context, in interface{}) (types.ConnectorServer, e } } +type Opt func(b *builderImpl) error + +func WithTicketingEnabled() Opt { + return func(b *builderImpl) error { + if _, ok := b.cb.(TicketManager); ok { + b.ticketingEnabled = true + return nil + } + return errors.New("external ticketing not supported") + } +} + +func WithMetricsHandler(h metrics.Handler) Opt { + return func(b *builderImpl) error { + b.m = metrics.New(h) + return nil + } +} + +func (b *builderImpl) options(opts ...Opt) error { + for _, opt := range opts { + if err := opt(b); err != nil { + return err + } + } + + return nil +} + func validateProvisionerVersion(ctx context.Context, p ResourceSyncer) error { _, ok := p.(ResourceProvisioner) _, okV2 := p.(ResourceProvisionerV2) @@ -265,19 +334,25 @@ func (b *builderImpl) ListResourceTypes( ctx context.Context, request *v2.ResourceTypesServiceListResourceTypesRequest, ) (*v2.ResourceTypesServiceListResourceTypesResponse, error) { + start := b.nowFunc() + tt := tasks.ListResourceTypesType var out []*v2.ResourceType for _, rb := range b.resourceBuilders { out = append(out, rb.ResourceType(ctx)) } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) return &v2.ResourceTypesServiceListResourceTypesResponse{List: out}, nil } // ListResources returns all available resources for a given resource type ID. func (b *builderImpl) ListResources(ctx context.Context, request *v2.ResourcesServiceListResourcesRequest) (*v2.ResourcesServiceListResourcesResponse, error) { + start := b.nowFunc() + tt := tasks.ListResourcesType rb, ok := b.resourceBuilders[request.ResourceTypeId] if !ok { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: list resources with unknown resource type %s", request.ResourceTypeId) } @@ -286,12 +361,15 @@ func (b *builderImpl) ListResources(ctx context.Context, request *v2.ResourcesSe Token: request.PageToken, }) if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: listing resources failed: %w", err) } if request.PageToken != "" && request.PageToken == nextPageToken { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: listing resources failed: next page token is the same as the current page token. this is most likely a connector bug") } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) return &v2.ResourcesServiceListResourcesResponse{ List: out, NextPageToken: nextPageToken, @@ -301,8 +379,11 @@ func (b *builderImpl) ListResources(ctx context.Context, request *v2.ResourcesSe // ListEntitlements returns all the entitlements for a given resource. func (b *builderImpl) ListEntitlements(ctx context.Context, request *v2.EntitlementsServiceListEntitlementsRequest) (*v2.EntitlementsServiceListEntitlementsResponse, error) { + start := b.nowFunc() + tt := tasks.ListEntitlementsType rb, ok := b.resourceBuilders[request.Resource.Id.ResourceType] if !ok { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: list entitlements with unknown resource type %s", request.Resource.Id.ResourceType) } @@ -311,12 +392,15 @@ func (b *builderImpl) ListEntitlements(ctx context.Context, request *v2.Entitlem Token: request.PageToken, }) if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: listing entitlements failed: %w", err) } if request.PageToken != "" && request.PageToken == nextPageToken { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: listing entitlements failed: next page token is the same as the current page token. this is most likely a connector bug") } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) return &v2.EntitlementsServiceListEntitlementsResponse{ List: out, NextPageToken: nextPageToken, @@ -326,9 +410,13 @@ func (b *builderImpl) ListEntitlements(ctx context.Context, request *v2.Entitlem // ListGrants lists all the grants for a given resource. func (b *builderImpl) ListGrants(ctx context.Context, request *v2.GrantsServiceListGrantsRequest) (*v2.GrantsServiceListGrantsResponse, error) { - rb, ok := b.resourceBuilders[request.Resource.Id.ResourceType] + start := b.nowFunc() + tt := tasks.ListGrantsType + rid := request.Resource.Id + rb, ok := b.resourceBuilders[rid.ResourceType] if !ok { - return nil, fmt.Errorf("error: list entitlements with unknown resource type %s", request.Resource.Id.ResourceType) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: list entitlements with unknown resource type %s", rid.ResourceType) } out, nextPageToken, annos, err := rb.Grants(ctx, request.Resource, &pagination.Token{ @@ -336,12 +424,17 @@ func (b *builderImpl) ListGrants(ctx context.Context, request *v2.GrantsServiceL Token: request.PageToken, }) if err != nil { - return nil, fmt.Errorf("error: listing grants failed: %w", err) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: listing grants for resource %s/%s failed: %w", rid.ResourceType, rid.Resource, err) } if request.PageToken != "" && request.PageToken == nextPageToken { - return nil, fmt.Errorf("error: listing grants failed: next page token is the same as the current page token. this is most likely a connector bug") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: listing grants for resource %s/%s failed: next page token is the same as the current page token. this is most likely a connector bug", + rid.ResourceType, + rid.Resource) } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) return &v2.GrantsServiceListGrantsResponse{ List: out, NextPageToken: nextPageToken, @@ -351,13 +444,23 @@ func (b *builderImpl) ListGrants(ctx context.Context, request *v2.GrantsServiceL // GetMetadata gets all metadata for a connector. func (b *builderImpl) GetMetadata(ctx context.Context, request *v2.ConnectorServiceGetMetadataRequest) (*v2.ConnectorServiceGetMetadataResponse, error) { + start := b.nowFunc() + tt := tasks.GetMetadataType md, err := b.cb.Metadata(ctx) if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, err } md.Capabilities = getCapabilities(ctx, b) + annos := annotations.Annotations(md.Annotations) + if b.ticketManager != nil { + annos.Append(&v2.ExternalTicketSettings{Enabled: b.ticketingEnabled}) + } + md.Annotations = annos + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) return &v2.ConnectorServiceGetMetadataResponse{Metadata: md}, nil } @@ -415,6 +518,8 @@ func (b *builderImpl) Validate(ctx context.Context, request *v2.ConnectorService } func (b *builderImpl) Grant(ctx context.Context, request *v2.GrantManagerServiceGrantRequest) (*v2.GrantManagerServiceGrantResponse, error) { + start := b.nowFunc() + tt := tasks.GrantType l := ctxzap.Extract(ctx) rt := request.Entitlement.Resource.Id.ResourceType @@ -423,9 +528,11 @@ func (b *builderImpl) Grant(ctx context.Context, request *v2.GrantManagerService annos, err := provisioner.Grant(ctx, request.Principal, request.Entitlement) if err != nil { l.Error("error: grant failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: grant failed: %w", err) } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) return &v2.GrantManagerServiceGrantResponse{Annotations: annos}, nil } @@ -434,17 +541,23 @@ func (b *builderImpl) Grant(ctx context.Context, request *v2.GrantManagerService grants, annos, err := provisionerV2.Grant(ctx, request.Principal, request.Entitlement) if err != nil { l.Error("error: grant failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: grant failed: %w", err) } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) return &v2.GrantManagerServiceGrantResponse{Annotations: annos, Grants: grants}, nil } l.Error("error: resource type does not have provisioner configured", zap.String("resource_type", rt)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: resource type does not have provisioner configured") } func (b *builderImpl) Revoke(ctx context.Context, request *v2.GrantManagerServiceRevokeRequest) (*v2.GrantManagerServiceRevokeResponse, error) { + start := b.nowFunc() + tt := tasks.RevokeType + l := ctxzap.Extract(ctx) rt := request.Grant.Entitlement.Resource.Id.ResourceType @@ -453,6 +566,7 @@ func (b *builderImpl) Revoke(ctx context.Context, request *v2.GrantManagerServic annos, err := provisioner.Revoke(ctx, request.Grant) if err != nil { l.Error("error: revoke failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: revoke failed: %w", err) } return &v2.GrantManagerServiceRevokeResponse{Annotations: annos}, nil @@ -463,12 +577,16 @@ func (b *builderImpl) Revoke(ctx context.Context, request *v2.GrantManagerServic annos, err := provisionerV2.Revoke(ctx, request.Grant) if err != nil { l.Error("error: revoke failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: revoke failed: %w", err) } + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) return &v2.GrantManagerServiceRevokeResponse{Annotations: annos}, nil } l.Error("error: resource type does not have provisioner configured", zap.String("resource_type", rt)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, status.Error(codes.Unimplemented, "resource type does not have provisioner configured") } @@ -479,7 +597,10 @@ func (b *builderImpl) GetAsset(request *v2.AssetServiceGetAssetRequest, server v } func (b *builderImpl) ListEvents(ctx context.Context, request *v2.ListEventsRequest) (*v2.ListEventsResponse, error) { + start := b.nowFunc() + tt := tasks.ListEventsType if b.eventFeed == nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: event feed not implemented") } events, streamState, annotations, err := b.eventFeed.ListEvents(ctx, request.StartAt, &pagination.StreamToken{ @@ -487,8 +608,10 @@ func (b *builderImpl) ListEvents(ctx context.Context, request *v2.ListEventsRequ Cursor: request.Cursor, }) if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: listing events failed: %w", err) } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) return &v2.ListEventsResponse{ Events: events, Cursor: streamState.Cursor, @@ -498,6 +621,8 @@ func (b *builderImpl) ListEvents(ctx context.Context, request *v2.ListEventsRequ } func (b *builderImpl) CreateResource(ctx context.Context, request *v2.CreateResourceRequest) (*v2.CreateResourceResponse, error) { + start := b.nowFunc() + tt := tasks.CreateResourceType l := ctxzap.Extract(ctx) rt := request.GetResource().GetId().GetResourceType() manager, ok := b.resourceManagers[rt] @@ -505,15 +630,21 @@ func (b *builderImpl) CreateResource(ctx context.Context, request *v2.CreateReso resource, annos, err := manager.Create(ctx, request.Resource) if err != nil { l.Error("error: create resource failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: create resource failed: %w", err) } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) return &v2.CreateResourceResponse{Created: resource, Annotations: annos}, nil } l.Error("error: resource type does not have resource manager configured", zap.String("resource_type", rt)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, status.Error(codes.Unimplemented, "resource type does not have resource manager configured") } func (b *builderImpl) DeleteResource(ctx context.Context, request *v2.DeleteResourceRequest) (*v2.DeleteResourceResponse, error) { + start := b.nowFunc() + tt := tasks.DeleteResourceType + l := ctxzap.Extract(ctx) rt := request.GetResourceId().GetResourceType() manager, ok := b.resourceManagers[rt] @@ -521,32 +652,40 @@ func (b *builderImpl) DeleteResource(ctx context.Context, request *v2.DeleteReso annos, err := manager.Delete(ctx, request.GetResourceId()) if err != nil { l.Error("error: delete resource failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: delete resource failed: %w", err) } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) return &v2.DeleteResourceResponse{Annotations: annos}, nil } l.Error("error: resource type does not have resource manager configured", zap.String("resource_type", rt)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, status.Error(codes.Unimplemented, "resource type does not have resource manager configured") } func (b *builderImpl) RotateCredential(ctx context.Context, request *v2.RotateCredentialRequest) (*v2.RotateCredentialResponse, error) { + start := b.nowFunc() + tt := tasks.RotateCredentialsType l := ctxzap.Extract(ctx) rt := request.GetResourceId().GetResourceType() manager, ok := b.credentialManagers[rt] if !ok { l.Error("error: resource type does not have credential manager configured", zap.String("resource_type", rt)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, status.Error(codes.Unimplemented, "resource type does not have credential manager configured") } plaintexts, annos, err := manager.Rotate(ctx, request.GetResourceId(), request.GetCredentialOptions()) if err != nil { l.Error("error: rotate credentials on resource failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: rotate credentials on resource failed: %w", err) } pkem, err := crypto.NewEncryptionManager(request.GetCredentialOptions(), request.GetEncryptionConfigs()) if err != nil { l.Error("error: creating encryption manager failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: creating encryption manager failed: %w", err) } @@ -554,11 +693,13 @@ func (b *builderImpl) RotateCredential(ctx context.Context, request *v2.RotateCr for _, plaintextCredential := range plaintexts { encryptedData, err := pkem.Encrypt(ctx, plaintextCredential) if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, err } encryptedDatas = append(encryptedDatas, encryptedData...) } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) return &v2.RotateCredentialResponse{ Annotations: annos, ResourceId: request.GetResourceId(), @@ -567,20 +708,25 @@ func (b *builderImpl) RotateCredential(ctx context.Context, request *v2.RotateCr } func (b *builderImpl) CreateAccount(ctx context.Context, request *v2.CreateAccountRequest) (*v2.CreateAccountResponse, error) { + start := b.nowFunc() + tt := tasks.CreateAccountType l := ctxzap.Extract(ctx) if b.accountManager == nil { l.Error("error: connector does not have account manager configured") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, status.Error(codes.Unimplemented, "connector does not have credential manager configured") } result, plaintexts, annos, err := b.accountManager.CreateAccount(ctx, request.GetAccountInfo(), request.GetCredentialOptions()) if err != nil { l.Error("error: create account failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: create account failed: %w", err) } pkem, err := crypto.NewEncryptionManager(request.GetCredentialOptions(), request.GetEncryptionConfigs()) if err != nil { l.Error("error: creating encryption manager failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, fmt.Errorf("error: creating encryption manager failed: %w", err) } @@ -588,6 +734,7 @@ func (b *builderImpl) CreateAccount(ctx context.Context, request *v2.CreateAccou for _, plaintextCredential := range plaintexts { encryptedData, err := pkem.Encrypt(ctx, plaintextCredential) if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, err } encryptedDatas = append(encryptedDatas, encryptedData...) @@ -604,8 +751,10 @@ func (b *builderImpl) CreateAccount(ctx context.Context, request *v2.CreateAccou case *v2.CreateAccountResponse_ActionRequiredResult: rv.Result = &v2.CreateAccountResponse_ActionRequired{ActionRequired: r} default: + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, status.Error(codes.Unimplemented, fmt.Sprintf("unknown result type: %T", result)) } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) return rv, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorstore/connectorstore.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorstore/connectorstore.go index 596278a..54d3f52 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorstore/connectorstore.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorstore/connectorstore.go @@ -36,10 +36,11 @@ type Writer interface { CurrentSyncStep(ctx context.Context) (string, error) CheckpointSync(ctx context.Context, syncToken string) error EndSync(ctx context.Context) error - PutResourceType(ctx context.Context, resourceType *v2.ResourceType) error - PutResource(ctx context.Context, resource *v2.Resource) error - PutEntitlement(ctx context.Context, entitlement *v2.Entitlement) error - PutGrant(ctx context.Context, grant *v2.Grant) error PutAsset(ctx context.Context, assetRef *v2.AssetRef, contentType string, data []byte) error Cleanup(ctx context.Context) error + + PutGrants(ctx context.Context, grants ...*v2.Grant) error + PutResourceTypes(ctx context.Context, resourceTypes ...*v2.ResourceType) error + PutResources(ctx context.Context, resources ...*v2.Resource) error + PutEntitlements(ctx context.Context, entitlements ...*v2.Entitlement) error } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/assets.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/assets.go index d3f723f..2347a5b 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/assets.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/assets.go @@ -9,7 +9,6 @@ import ( "github.com/doug-martin/goqu/v9" "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" ) @@ -64,8 +63,6 @@ func (c *C1File) PutAsset(ctx context.Context, assetRef *v2.AssetRef, contentTyp contentType = "unknown" } - l.Debug("syncing asset", zap.String("content_type", contentType), zap.Int("asset_size", len(data))) - err := c.validateSyncDb(ctx) if err != nil { return err @@ -101,8 +98,6 @@ func (c *C1File) PutAsset(ctx context.Context, assetRef *v2.AssetRef, contentTyp // GetAsset fetches the specified asset from the database, and returns the content type and an io.Reader for the caller to // read the asset from. func (c *C1File) GetAsset(ctx context.Context, request *v2.AssetServiceGetAssetRequest) (string, io.Reader, error) { - ctxzap.Extract(ctx).Debug("fetching asset", zap.String("id", request.Asset.Id)) - err := c.validateDb(ctx) if err != nil { return "", nil, err diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go index b33d1c3..04a3529 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go @@ -8,9 +8,17 @@ import ( "path/filepath" "github.com/doug-martin/goqu/v9" + // NOTE: required to register the dialect for goqu. + // + // If you remove this import, goqu.Dialect("sqlite3") will + // return a copy of the default dialect, which is not what we want, + // and allocates a ton of memory. + _ "github.com/doug-martin/goqu/v9/dialect/sqlite3" + _ "github.com/glebarez/go-sqlite" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/connectorstore" ) type pragma struct { @@ -30,6 +38,8 @@ type C1File struct { pragmas []pragma } +var _ connectorstore.Writer = (*C1File)(nil) + type C1FOption func(*C1File) func WithC1FTmpDir(tempDir string) C1FOption { @@ -50,6 +60,7 @@ func NewC1File(ctx context.Context, dbFilePath string, opts ...C1FOption) (*C1Fi if err != nil { return nil, err } + db := goqu.New("sqlite3", rawDB) c1File := &C1File{ diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/entitlements.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/entitlements.go index 69e4eb6..7191d21 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/entitlements.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/entitlements.go @@ -5,8 +5,6 @@ import ( "fmt" "github.com/doug-martin/goqu/v9" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" "google.golang.org/protobuf/proto" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" @@ -51,10 +49,9 @@ func (r *entitlementsTable) Schema() (string, []interface{}) { } func (c *C1File) ListEntitlements(ctx context.Context, request *v2.EntitlementsServiceListEntitlementsRequest) (*v2.EntitlementsServiceListEntitlementsResponse, error) { - ctxzap.Extract(ctx).Debug("listing entitlements") objs, nextPageToken, err := c.listConnectorObjects(ctx, entitlements.Name(), request) if err != nil { - return nil, err + return nil, fmt.Errorf("error listing entitlements: %w", err) } ret := make([]*v2.Entitlement, 0, len(objs)) @@ -74,13 +71,11 @@ func (c *C1File) ListEntitlements(ctx context.Context, request *v2.EntitlementsS } func (c *C1File) GetEntitlement(ctx context.Context, request *reader_v2.EntitlementsReaderServiceGetEntitlementRequest) (*reader_v2.EntitlementsReaderServiceGetEntitlementResponse, error) { - ctxzap.Extract(ctx).Debug("fetching entitlement", zap.String("entitlement_id", request.EntitlementId)) - ret := &v2.Entitlement{} err := c.getConnectorObject(ctx, entitlements.Name(), request.EntitlementId, ret) if err != nil { - return nil, err + return nil, fmt.Errorf("error fetching entitlement '%s': %w", request.EntitlementId, err) } return &reader_v2.EntitlementsReaderServiceGetEntitlementResponse{ @@ -88,27 +83,26 @@ func (c *C1File) GetEntitlement(ctx context.Context, request *reader_v2.Entitlem }, nil } -func (c *C1File) PutEntitlement(ctx context.Context, entitlement *v2.Entitlement) error { - ctxzap.Extract(ctx).Debug("syncing entitlement", zap.String("entitlement_id", entitlement.Id)) - - if entitlement.Resource == nil && entitlement.Resource.Id == nil { - return fmt.Errorf("entitlements must have a non-nil resource") - } +func (c *C1File) PutEntitlements(ctx context.Context, entitlementObjs ...*v2.Entitlement) error { + err := c.db.WithTx(func(tx *goqu.TxDatabase) error { + err := bulkPutConnectorObjectTx(ctx, c, tx, entitlements.Name(), + func(entitlement *v2.Entitlement) (goqu.Record, error) { + return goqu.Record{ + "resource_id": entitlement.Resource.Id.Resource, + "resource_type_id": entitlement.Resource.Id.ResourceType, + }, nil + }, + entitlementObjs..., + ) + if err != nil { + return err + } - query, args, err := c.putConnectorObjectQuery(ctx, entitlements.Name(), entitlement, goqu.Record{ - "resource_id": entitlement.Resource.Id.Resource, - "resource_type_id": entitlement.Resource.Id.ResourceType, + return nil }) if err != nil { return err } - - _, err = c.db.ExecContext(ctx, query, args...) - if err != nil { - return err - } - c.dbUpdated = true - return nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/grants.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/grants.go index 1f82d58..0a3cf5a 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/grants.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/grants.go @@ -5,8 +5,6 @@ import ( "fmt" "github.com/doug-martin/goqu/v9" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" "google.golang.org/protobuf/proto" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" @@ -60,11 +58,9 @@ func (r *grantsTable) Schema() (string, []interface{}) { } func (c *C1File) ListGrants(ctx context.Context, request *v2.GrantsServiceListGrantsRequest) (*v2.GrantsServiceListGrantsResponse, error) { - ctxzap.Extract(ctx).Debug("listing grants") - objs, nextPageToken, err := c.listConnectorObjects(ctx, grants.Name(), request) if err != nil { - return nil, err + return nil, fmt.Errorf("error listing grants: %w", err) } ret := make([]*v2.Grant, 0, len(objs)) @@ -84,13 +80,11 @@ func (c *C1File) ListGrants(ctx context.Context, request *v2.GrantsServiceListGr } func (c *C1File) GetGrant(ctx context.Context, request *reader_v2.GrantsReaderServiceGetGrantRequest) (*reader_v2.GrantsReaderServiceGetGrantResponse, error) { - ctxzap.Extract(ctx).Debug("fetching grant", zap.String("grant_id", request.GrantId)) - ret := &v2.Grant{} err := c.getConnectorObject(ctx, grants.Name(), request.GrantId, ret) if err != nil { - return nil, err + return nil, fmt.Errorf("error fetching grant '%s': %w", request.GetGrantId(), err) } return &reader_v2.GrantsReaderServiceGetGrantResponse{ @@ -102,11 +96,9 @@ func (c *C1File) ListGrantsForEntitlement( ctx context.Context, request *reader_v2.GrantsReaderServiceListGrantsForEntitlementRequest, ) (*reader_v2.GrantsReaderServiceListGrantsForEntitlementResponse, error) { - ctxzap.Extract(ctx).Debug("listing grants for entitlement") - objs, nextPageToken, err := c.listConnectorObjects(ctx, grants.Name(), request) if err != nil { - return nil, err + return nil, fmt.Errorf("error listing grants for entitlement '%s': %w", request.GetEntitlement().GetId(), err) } ret := make([]*v2.Grant, 0, len(objs)) @@ -129,11 +121,9 @@ func (c *C1File) ListGrantsForPrincipal( ctx context.Context, request *reader_v2.GrantsReaderServiceListGrantsForEntitlementRequest, ) (*reader_v2.GrantsReaderServiceListGrantsForEntitlementResponse, error) { - ctxzap.Extract(ctx).Debug("listing grants for entitlement") - objs, nextPageToken, err := c.listConnectorObjects(ctx, grants.Name(), request) if err != nil { - return nil, err + return nil, fmt.Errorf("error listing grants for principal '%s': %w", request.GetPrincipalId(), err) } ret := make([]*v2.Grant, 0, len(objs)) @@ -156,11 +146,9 @@ func (c *C1File) ListGrantsForResourceType( ctx context.Context, request *reader_v2.GrantsReaderServiceListGrantsForResourceTypeRequest, ) (*reader_v2.GrantsReaderServiceListGrantsForResourceTypeResponse, error) { - ctxzap.Extract(ctx).Debug("listing grants for resource type") - objs, nextPageToken, err := c.listConnectorObjects(ctx, grants.Name(), request) if err != nil { - return nil, err + return nil, fmt.Errorf("error listing grants for resource type '%s': %w", request.GetResourceTypeId(), err) } ret := make([]*v2.Grant, 0, len(objs)) @@ -179,26 +167,28 @@ func (c *C1File) ListGrantsForResourceType( }, nil } -func (c *C1File) PutGrant(ctx context.Context, grant *v2.Grant) error { - ctxzap.Extract(ctx).Debug("syncing grant", zap.String("grant_id", grant.Id)) - - query, args, err := c.putConnectorObjectQuery(ctx, grants.Name(), grant, goqu.Record{ - "resource_type_id": grant.Entitlement.Resource.Id.ResourceType, - "resource_id": grant.Entitlement.Resource.Id.Resource, - "entitlement_id": grant.Entitlement.Id, - "principal_resource_type_id": grant.Principal.Id.ResourceType, - "principal_resource_id": grant.Principal.Id.Resource, +func (c *C1File) PutGrants(ctx context.Context, bulkGrants ...*v2.Grant) error { + err := c.db.WithTx(func(tx *goqu.TxDatabase) error { + err := bulkPutConnectorObjectTx(ctx, c, tx, grants.Name(), + func(grant *v2.Grant) (goqu.Record, error) { + return goqu.Record{ + "resource_type_id": grant.Entitlement.Resource.Id.ResourceType, + "resource_id": grant.Entitlement.Resource.Id.Resource, + "entitlement_id": grant.Entitlement.Id, + "principal_resource_type_id": grant.Principal.Id.ResourceType, + "principal_resource_id": grant.Principal.Id.Resource, + }, nil + }, + bulkGrants..., + ) + if err != nil { + return err + } + return nil }) if err != nil { return err } - - _, err = c.db.ExecContext(ctx, query, args...) - if err != nil { - return err - } - c.dbUpdated = true - return nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resouce_types.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resouce_types.go index 14048e3..a0bb4d7 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resouce_types.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resouce_types.go @@ -4,12 +4,11 @@ import ( "context" "fmt" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" "google.golang.org/protobuf/proto" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" reader_v2 "github.com/conductorone/baton-sdk/pb/c1/reader/v2" + "github.com/doug-martin/goqu/v9" ) const resourceTypesTableVersion = "1" @@ -45,11 +44,9 @@ func (r *resourceTypesTable) Schema() (string, []interface{}) { } func (c *C1File) ListResourceTypes(ctx context.Context, request *v2.ResourceTypesServiceListResourceTypesRequest) (*v2.ResourceTypesServiceListResourceTypesResponse, error) { - ctxzap.Extract(ctx).Debug("listing resource types") - objs, nextPageToken, err := c.listConnectorObjects(ctx, resourceTypes.Name(), request) if err != nil { - return nil, err + return nil, fmt.Errorf("error listing resource types: %w", err) } ret := make([]*v2.ResourceType, 0, len(objs)) @@ -69,13 +66,11 @@ func (c *C1File) ListResourceTypes(ctx context.Context, request *v2.ResourceType } func (c *C1File) GetResourceType(ctx context.Context, request *reader_v2.ResourceTypesReaderServiceGetResourceTypeRequest) (*reader_v2.ResourceTypesReaderServiceGetResourceTypeResponse, error) { - ctxzap.Extract(ctx).Debug("fetching resource type", zap.String("resource_type_id", request.ResourceTypeId)) - ret := &v2.ResourceType{} err := c.getConnectorObject(ctx, resourceTypes.Name(), request.ResourceTypeId, ret) if err != nil { - return nil, err + return nil, fmt.Errorf("error fetching resource type '%s': %w", request.ResourceTypeId, err) } return &reader_v2.ResourceTypesReaderServiceGetResourceTypeResponse{ @@ -83,20 +78,22 @@ func (c *C1File) GetResourceType(ctx context.Context, request *reader_v2.Resourc }, nil } -func (c *C1File) PutResourceType(ctx context.Context, resourceType *v2.ResourceType) error { - ctxzap.Extract(ctx).Debug("syncing resource type", zap.String("resource_type_id", resourceType.Id)) - - query, args, err := c.putConnectorObjectQuery(ctx, resourceTypes.Name(), resourceType, nil) - if err != nil { - return err - } - - _, err = c.db.ExecContext(ctx, query, args...) +func (c *C1File) PutResourceTypes(ctx context.Context, resourceTypesObjs ...*v2.ResourceType) error { + err := c.db.WithTx(func(tx *goqu.TxDatabase) error { + err := bulkPutConnectorObjectTx(ctx, c, tx, resourceTypes.Name(), + func(resource *v2.ResourceType) (goqu.Record, error) { + return nil, nil + }, + resourceTypesObjs..., + ) + if err != nil { + return err + } + return nil + }) if err != nil { return err } - c.dbUpdated = true - return nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resources.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resources.go index a463316..dc23569 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resources.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resources.go @@ -4,13 +4,12 @@ import ( "context" "fmt" - c1zpb "github.com/conductorone/baton-sdk/pb/c1/c1z/v1" - "github.com/conductorone/baton-sdk/pkg/annotations" "github.com/doug-martin/goqu/v9" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" "google.golang.org/protobuf/proto" + c1zpb "github.com/conductorone/baton-sdk/pb/c1/c1z/v1" + "github.com/conductorone/baton-sdk/pkg/annotations" + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" reader_v2 "github.com/conductorone/baton-sdk/pb/c1/reader/v2" ) @@ -57,11 +56,9 @@ func (r *resourcesTable) Schema() (string, []interface{}) { } func (c *C1File) ListResources(ctx context.Context, request *v2.ResourcesServiceListResourcesRequest) (*v2.ResourcesServiceListResourcesResponse, error) { - ctxzap.Extract(ctx).Debug("listing resources") - objs, nextPageToken, err := c.listConnectorObjects(ctx, resources.Name(), request) if err != nil { - return nil, err + return nil, fmt.Errorf("error listing resources: %w", err) } ret := make([]*v2.Resource, 0, len(objs)) @@ -81,12 +78,6 @@ func (c *C1File) ListResources(ctx context.Context, request *v2.ResourcesService } func (c *C1File) GetResource(ctx context.Context, request *reader_v2.ResourcesReaderServiceGetResourceRequest) (*reader_v2.ResourcesReaderServiceGetResourceResponse, error) { - ctxzap.Extract(ctx).Debug( - "fetching resource", - zap.String("resource_id", request.ResourceId.Resource), - zap.String("resource_type_id", request.ResourceId.ResourceType), - ) - ret := &v2.Resource{} annos := annotations.Annotations(request.GetAnnotations()) syncDetails := &c1zpb.SyncDetails{} @@ -98,7 +89,7 @@ func (c *C1File) GetResource(ctx context.Context, request *reader_v2.ResourcesRe err := c.getResourceObject(ctx, request.ResourceId, ret, syncID) if err != nil { - return nil, err + return nil, fmt.Errorf("error fetching resource '%s': %w", request.ResourceId, err) } return &reader_v2.ResourcesReaderServiceGetResourceResponse{ @@ -106,34 +97,31 @@ func (c *C1File) GetResource(ctx context.Context, request *reader_v2.ResourcesRe }, nil } -func (c *C1File) PutResource(ctx context.Context, resource *v2.Resource) error { - ctxzap.Extract(ctx).Debug( - "syncing resource", - zap.String("resource_id", resource.Id.Resource), - zap.String("resource_type_id", resource.Id.ResourceType), - ) - - updateRecord := goqu.Record{ - "resource_type_id": resource.Id.ResourceType, - "external_id": fmt.Sprintf("%s:%s", resource.Id.ResourceType, resource.Id.Resource), - } - - if resource.ParentResourceId != nil { - updateRecord["parent_resource_type_id"] = resource.ParentResourceId.ResourceType - updateRecord["parent_resource_id"] = resource.ParentResourceId.Resource - } - - query, args, err := c.putConnectorObjectQuery(ctx, resources.Name(), resource, updateRecord) - if err != nil { - return err - } - - _, err = c.db.ExecContext(ctx, query, args...) +func (c *C1File) PutResources(ctx context.Context, resourceObjs ...*v2.Resource) error { + err := c.db.WithTx(func(tx *goqu.TxDatabase) error { + err := bulkPutConnectorObjectTx(ctx, c, tx, resources.Name(), + func(resource *v2.Resource) (goqu.Record, error) { + fields := goqu.Record{ + "resource_type_id": resource.Id.ResourceType, + "external_id": fmt.Sprintf("%s:%s", resource.Id.ResourceType, resource.Id.Resource), + } + + if resource.ParentResourceId != nil { + fields["parent_resource_type_id"] = resource.ParentResourceId.ResourceType + fields["parent_resource_id"] = resource.ParentResourceId.Resource + } + return fields, nil + }, + resourceObjs..., + ) + if err != nil { + return err + } + return nil + }) if err != nil { return err } - c.dbUpdated = true - return nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go index 63f8356..3bd3fb9 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go @@ -70,8 +70,8 @@ type protoHasID interface { GetId() string } -// listConnectorObjects uses a connecter list request to fetch the corresponding data from the local db. -// It returns the raw bytes that need to be unmarshaled into the correct proto message. +// listConnectorObjects uses a connector list request to fetch the corresponding data from the local db. +// It returns the raw bytes that need to be unmarshalled into the correct proto message. func (c *C1File) listConnectorObjects(ctx context.Context, tableName string, req proto.Message) ([][]byte, string, error) { err := c.validateDb(ctx) if err != nil { @@ -231,37 +231,56 @@ func (c *C1File) listConnectorObjects(ctx context.Context, tableName string, req return ret, nextPageToken, nil } -func (c *C1File) putConnectorObjectQuery(ctx context.Context, tableName string, m proto.Message, fields goqu.Record) (string, []interface{}, error) { - err := c.validateSyncDb(ctx) - if err != nil { - return "", nil, err - } +var protoMarshaler = proto.MarshalOptions{Deterministic: true} - messageBlob, err := proto.MarshalOptions{Deterministic: true}.Marshal(m) +func bulkPutConnectorObjectTx[T proto.Message](ctx context.Context, c *C1File, + tx *goqu.TxDatabase, + tableName string, + extractFields func(m T) (goqu.Record, error), + msgs ...T) error { + err := c.validateSyncDb(ctx) if err != nil { - return "", nil, err + return err } - if fields == nil { - fields = goqu.Record{} - } + baseQ := tx.Insert(tableName).Prepared(true) + baseQ = baseQ.OnConflict(goqu.DoUpdate("external_id, sync_id", goqu.C("data").Set(goqu.I("EXCLUDED.data")))) - if _, idSet := fields["external_id"]; !idSet { - idGetter, ok := m.(protoHasID) - if !ok { - return "", nil, fmt.Errorf("unable to get ID for object") + for _, m := range msgs { + messageBlob, err := protoMarshaler.Marshal(m) + if err != nil { + return err } - fields["external_id"] = idGetter.GetId() - } - fields["data"] = messageBlob - fields["sync_id"] = c.currentSyncID - fields["discovered_at"] = time.Now().Format("2006-01-02 15:04:05.999999999") - q := c.db.Insert(tableName).Prepared(true) - q = q.Rows(fields) - q = q.OnConflict(goqu.DoUpdate("external_id, sync_id", goqu.C("data").Set(goqu.I("EXCLUDED.data")))) + fields, err := extractFields(m) + if err != nil { + return err + } + if fields == nil { + fields = goqu.Record{} + } - return q.ToSQL() + if _, idSet := fields["external_id"]; !idSet { + idGetter, ok := any(m).(protoHasID) + if !ok { + return fmt.Errorf("unable to get ID for object") + } + fields["external_id"] = idGetter.GetId() + } + fields["data"] = messageBlob + fields["sync_id"] = c.currentSyncID + fields["discovered_at"] = time.Now().Format("2006-01-02 15:04:05.999999999") + q := baseQ.Rows(fields) + query, args, err := q.ToSQL() + if err != nil { + return err + } + _, err = tx.Exec(query, args...) + if err != nil { + return err + } + } + return nil } func (c *C1File) getResourceObject(ctx context.Context, resourceID *v2.ResourceId, m *v2.Resource, syncID string) error { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/default_relationships.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/default_relationships.go new file mode 100644 index 0000000..f889a02 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/default_relationships.go @@ -0,0 +1,32 @@ +package field + +var defaultRelationship = []SchemaFieldRelationship{ + FieldsRequiredTogether(grantEntitlementField, grantPrincipalField), + FieldsRequiredTogether(clientIDField, clientSecretField), + FieldsRequiredTogether(createTicketField, ticketTemplatePathField), + FieldsRequiredTogether(getTicketField, ticketIDField), + FieldsMutuallyExclusive( + grantEntitlementField, + revokeGrantField, + createAccountLoginField, + deleteResourceField, + rotateCredentialsField, + eventFeedField, + createTicketField, + getTicketField, + ListTicketSchemasField, + ), + FieldsMutuallyExclusive( + grantEntitlementField, + revokeGrantField, + createAccountEmailField, + deleteResourceTypeField, + rotateCredentialsTypeField, + eventFeedField, + ListTicketSchemasField, + ), +} + +func EnsureDefaultRelationships(original []SchemaFieldRelationship) []SchemaFieldRelationship { + return append(defaultRelationship, original...) +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go new file mode 100644 index 0000000..ced64e9 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go @@ -0,0 +1,92 @@ +package field + +import "github.com/conductorone/baton-sdk/pkg/logging" + +var ( + createTicketField = BoolField("create-ticket", WithHidden(true), WithDescription("Create ticket")) + getTicketField = BoolField("get-ticket", WithHidden(true), WithDescription("Get ticket")) + ListTicketSchemasField = BoolField("list-ticket-schemas", WithHidden(true), WithDescription("List ticket schemas")) + provisioningField = BoolField("provisioning", WithShortHand("p"), WithDescription("This must be set in order for provisioning actions to be enabled")) + TicketingField = BoolField("ticketing", WithDescription("This must be set to enable ticketing support")) + c1zTmpDirField = StringField("c1z-temp-dir", WithHidden(true), WithDescription("The directory to store temporary files in. It must exist, "+ + "and write access is required. Defaults to the OS temporary directory.")) + clientIDField = StringField("client-id", WithDescription("The client ID used to authenticate with ConductorOne")) + clientSecretField = StringField("client-secret", WithDescription("The client secret used to authenticate with ConductorOne")) + createAccountEmailField = StringField("create-account-email", WithHidden(true), WithDescription("The email of the account to create")) + createAccountLoginField = StringField("create-account-login", WithHidden(true), WithDescription("The login of the account to create")) + deleteResourceField = StringField("delete-resource", WithHidden(true), WithDescription("The id of the resource to delete")) + deleteResourceTypeField = StringField("delete-resource-type", WithHidden(true), WithDescription("The type of the resource to delete")) + eventFeedField = StringField("event-feed", WithHidden(true), WithDescription("Read feed events to stdout")) + fileField = StringField("file", WithShortHand("f"), WithDefaultValue("sync.c1z"), WithDescription("The path to the c1z file to sync with")) + grantEntitlementField = StringField("grant-entitlement", WithHidden(true), WithDescription("The id of the entitlement to grant to the supplied principal")) + grantPrincipalField = StringField("grant-principal", WithHidden(true), WithDescription("The id of the resource to grant the entitlement to")) + grantPrincipalTypeField = StringField("grant-principal-type", WithHidden(true), WithDescription("The resource type of the principal to grant the entitlement to")) + logFormatField = StringField("log-format", WithDefaultValue(logging.LogFormatJSON), WithDescription("The output format for logs: json, console")) + revokeGrantField = StringField("revoke-grant", WithHidden(true), WithDescription("The grant to revoke")) + rotateCredentialsField = StringField("rotate-credentials", WithHidden(true), WithDescription("The id of the resource to rotate credentials on")) + rotateCredentialsTypeField = StringField("rotate-credentials-type", WithHidden(true), WithDescription("The type of the resource to rotate credentials on")) + ticketIDField = StringField("ticket-id", WithHidden(true), WithDescription("The ID of the ticket to get")) + ticketTemplatePathField = StringField("ticket-template-path", WithHidden(true), WithDescription("A JSON file describing the ticket to create")) + logLevelField = StringField("log-level", WithDefaultValue("info"), WithDescription("The log level: debug, info, warn, error")) +) + +// DefaultFields list the default fields expected in every single connector. +var DefaultFields = []SchemaField{ + createTicketField, + getTicketField, + ListTicketSchemasField, + provisioningField, + TicketingField, + c1zTmpDirField, + clientIDField, + clientSecretField, + createAccountEmailField, + createAccountLoginField, + deleteResourceField, + deleteResourceTypeField, + eventFeedField, + fileField, + grantEntitlementField, + grantPrincipalField, + grantPrincipalTypeField, + logFormatField, + revokeGrantField, + rotateCredentialsField, + rotateCredentialsTypeField, + ticketIDField, + ticketTemplatePathField, + logLevelField, +} + +func IsFieldAmongDefaultList(f SchemaField) bool { + for _, v := range DefaultFields { + if v.FieldName == f.FieldName { + return true + } + } + + return false +} + +func EnsureDefaultFieldsExists(originalFields []SchemaField) []SchemaField { + var notfound []SchemaField + + // compare the default list of fields + // with the incoming original list of fields + for _, d := range DefaultFields { + found := false + for _, o := range originalFields { + if d.FieldName == o.FieldName { + found = true + } + } + + if !found { + notfound = append(notfound, d) + } + } + + notfound = append(notfound, originalFields...) + + return notfound +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/field_options.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/field_options.go new file mode 100644 index 0000000..7177ee0 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/field_options.go @@ -0,0 +1,42 @@ +package field + +type fieldOption func(SchemaField) SchemaField + +func WithRequired(required bool) fieldOption { + return func(o SchemaField) SchemaField { + o.Required = required + return o + } +} + +func WithDescription(description string) fieldOption { + return func(o SchemaField) SchemaField { + o.Description = description + + return o + } +} + +func WithDefaultValue(value any) fieldOption { + return func(o SchemaField) SchemaField { + o.DefaultValue = value + + return o + } +} + +func WithHidden(hidden bool) fieldOption { + return func(o SchemaField) SchemaField { + o.Hidden = hidden + + return o + } +} + +func WithShortHand(sh string) fieldOption { + return func(o SchemaField) SchemaField { + o.CLIShortHand = sh + + return o + } +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/fields.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/fields.go new file mode 100644 index 0000000..7b08deb --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/fields.go @@ -0,0 +1,149 @@ +package field + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +var ( + WrongValueTypeErr = errors.New("unable to cast any to concrete type") +) + +type SchemaField struct { + FieldName string + FieldType reflect.Kind + CLIShortHand string + Required bool + Hidden bool + Description string + DefaultValue any +} + +// Bool returns the default value as a boolean. +func (s SchemaField) Bool() (bool, error) { + value, ok := s.DefaultValue.(bool) + if !ok { + return false, WrongValueTypeErr + } + + return value, nil +} + +// Int returns the default value as a integer. +func (s SchemaField) Int() (int, error) { + value, ok := s.DefaultValue.(int) + if !ok { + return 0, WrongValueTypeErr + } + + return value, nil +} + +// String returns the default value as a string. +func (s SchemaField) String() (string, error) { + value, ok := s.DefaultValue.(string) + if !ok { + return "", WrongValueTypeErr + } + + return value, nil +} + +// StringArray retuns the default value as a string array. +func (s SchemaField) StringArray() ([]string, error) { + value, ok := s.DefaultValue.([]string) + if !ok { + return nil, WrongValueTypeErr + } + + return value, nil +} + +func (s SchemaField) GetDescription() string { + var line string + if s.Description == "" { + line = fmt.Sprintf("($BATON_%s)", toUpperCase(s.FieldName)) + } else { + line = fmt.Sprintf("%s ($BATON_%s)", s.Description, toUpperCase(s.FieldName)) + } + + if s.Required { + line = fmt.Sprintf("required: %s", line) + } + + return line +} + +func (s SchemaField) GetName() string { + return s.FieldName +} + +func (s SchemaField) GetType() reflect.Kind { + return s.FieldType +} + +func BoolField(name string, optional ...fieldOption) SchemaField { + field := SchemaField{ + FieldName: name, + FieldType: reflect.Bool, + DefaultValue: false, + } + + for _, o := range optional { + field = o(field) + } + + if field.Required { + panic(fmt.Sprintf("requiring %s of type %s does not make sense", field.FieldName, field.FieldType)) + } + + return field +} + +func StringField(name string, optional ...fieldOption) SchemaField { + field := SchemaField{ + FieldName: name, + FieldType: reflect.String, + DefaultValue: "", + } + + for _, o := range optional { + field = o(field) + } + + return field +} + +func IntField(name string, optional ...fieldOption) SchemaField { + field := SchemaField{ + FieldName: name, + FieldType: reflect.Int, + DefaultValue: 0, + } + + for _, o := range optional { + field = o(field) + } + + return field +} + +func StringArrayField(name string, optional ...fieldOption) SchemaField { + field := SchemaField{ + FieldName: name, + FieldType: reflect.Slice, + DefaultValue: []string{}, + } + + for _, o := range optional { + field = o(field) + } + + return field +} + +func toUpperCase(i string) string { + return strings.ReplaceAll(strings.ToUpper(i), "-", "_") +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/relationships.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/relationships.go new file mode 100644 index 0000000..769f822 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/relationships.go @@ -0,0 +1,45 @@ +package field + +type Relationship int + +const ( + RequiredTogether Relationship = iota + 1 + MutuallyExclusive + AtLeastOne + Dependents +) + +type SchemaFieldRelationship struct { + Kind Relationship + Fields []SchemaField + ExpectedFields []SchemaField +} + +func FieldsRequiredTogether(fields ...SchemaField) SchemaFieldRelationship { + return SchemaFieldRelationship{ + Kind: RequiredTogether, + Fields: fields, + } +} + +func FieldsMutuallyExclusive(fields ...SchemaField) SchemaFieldRelationship { + return SchemaFieldRelationship{ + Kind: MutuallyExclusive, + Fields: fields, + } +} + +func FieldsAtLeastOneUsed(fields ...SchemaField) SchemaFieldRelationship { + return SchemaFieldRelationship{ + Kind: AtLeastOne, + Fields: fields, + } +} + +func FieldsDependentOn(dependent []SchemaField, expected []SchemaField) SchemaFieldRelationship { + return SchemaFieldRelationship{ + Kind: Dependents, + Fields: dependent, + ExpectedFields: expected, + } +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/struct.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/struct.go new file mode 100644 index 0000000..12ecf64 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/struct.go @@ -0,0 +1,13 @@ +package field + +type Configuration struct { + Fields []SchemaField + Constraints []SchemaFieldRelationship +} + +func NewConfiguration(fields []SchemaField, constraints ...SchemaFieldRelationship) Configuration { + return Configuration{ + Fields: fields, + Constraints: constraints, + } +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/validation.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/validation.go new file mode 100644 index 0000000..6d39c76 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/validation.go @@ -0,0 +1,153 @@ +package field + +import ( + "fmt" + "reflect" + "strings" + + "github.com/spf13/viper" +) + +type ErrConfigurationMissingFields struct { + errors []error +} + +func (e *ErrConfigurationMissingFields) Error() string { + var messages []string + + for _, err := range e.errors { + messages = append(messages, err.Error()) + } + + return fmt.Sprintf("errors found:\n%s", strings.Join(messages, "\n")) +} + +func (e *ErrConfigurationMissingFields) Push(err error) { + e.errors = append(e.errors, err) +} + +// Validate perform validation of field requirement and constraints +// relationships after the configuration is read. +// We don't check the following: +// - if required fields are mutually exclusive +// - repeated fields (by name) are defined +// - if sets of fields are mutually exclusive and required +// together at the same time +// - if fields depedent on themselves +func Validate(c Configuration, v *viper.Viper) error { + present := make(map[string]int) + missingFieldsError := &ErrConfigurationMissingFields{} + + // check if required fields are present + for _, f := range c.Fields { + isNonZero := false + switch f.FieldType { + case reflect.Bool: + isNonZero = v.GetBool(f.FieldName) + case reflect.Int: + isNonZero = v.GetInt(f.FieldName) != 0 + case reflect.String: + isNonZero = v.GetString(f.FieldName) != "" + case reflect.Slice: + isNonZero = len(v.GetStringSlice(f.FieldName)) == 0 + default: + return fmt.Errorf("field %s has unsupported type %s", f.FieldName, f.FieldType) + } + + if isNonZero { + present[f.FieldName] = 1 + } + + if f.Required && !isNonZero { + missingFieldsError.Push(fmt.Errorf("field %s of type %s is marked as required but it has a zero-value", f.FieldName, f.FieldType)) + } + } + + if len(missingFieldsError.errors) > 0 { + return missingFieldsError + } + + // check constraints + return validateConstraints(present, c.Constraints) +} + +func validateConstraints(fieldsPresent map[string]int, relationships []SchemaFieldRelationship) error { + for _, relationship := range relationships { + var present int + for _, f := range relationship.Fields { + present += fieldsPresent[f.FieldName] + } + + var expected int + for _, e := range relationship.ExpectedFields { + expected += fieldsPresent[e.FieldName] + } + + if present > 1 && relationship.Kind == MutuallyExclusive { + return makeMutuallyExclusiveError(fieldsPresent, relationship) + } + if present > 0 && present < len(relationship.Fields) && relationship.Kind == RequiredTogether { + return makeNeededTogetherError(fieldsPresent, relationship) + } + if present == 0 && relationship.Kind == AtLeastOne { + return makeAtLeastOneError(fieldsPresent, relationship) + } + if present > 0 && expected != len(relationship.ExpectedFields) && relationship.Kind == Dependents { + return makeDependentFieldsError(fieldsPresent, relationship) + } + } + + return nil +} + +func makeMutuallyExclusiveError(fields map[string]int, relation SchemaFieldRelationship) error { + var found []string + for _, f := range relation.Fields { + if fields[f.FieldName] == 1 { + found = append(found, f.FieldName) + } + } + + return fmt.Errorf("fields marked as mutually exclusive were set: %s", strings.Join(found, ", ")) +} + +func makeNeededTogetherError(fields map[string]int, relation SchemaFieldRelationship) error { + var found []string + for _, f := range relation.Fields { + if fields[f.FieldName] == 0 { + found = append(found, f.FieldName) + } + } + + return fmt.Errorf("fields marked as needed together are missing: %s", strings.Join(found, ", ")) +} + +func makeAtLeastOneError(fields map[string]int, relation SchemaFieldRelationship) error { + var found []string + for _, f := range relation.Fields { + if fields[f.FieldName] == 0 { + found = append(found, f.FieldName) + } + } + + return fmt.Errorf("at least one field was expected, any of: %s", strings.Join(found, ", ")) +} + +func makeDependentFieldsError(fields map[string]int, relation SchemaFieldRelationship) error { + var notfoundExpected []string + for _, n := range relation.ExpectedFields { + if fields[n.FieldName] == 0 { + notfoundExpected = append(notfoundExpected, n.FieldName) + } + } + + var foundDependent []string + for _, f := range relation.Fields { + if fields[f.FieldName] == 1 { + foundDependent = append(foundDependent, f.FieldName) + } + } + + return fmt.Errorf("set fields %s are dependent on %s being set", + strings.Join(foundDependent, ", "), strings.Join(notfoundExpected, ", ")) +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/helpers/helpers.go b/vendor/github.com/conductorone/baton-sdk/pkg/helpers/helpers.go index b08f689..b4da5ed 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/helpers/helpers.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/helpers/helpers.go @@ -25,6 +25,57 @@ func SplitFullName(name string) (string, string) { return firstName, lastName } +var limitHeaders = []string{ + "X-Ratelimit-Limit", + "Ratelimit-Limit", + "X-RateLimit-Requests-Limit", // Linear uses a non-standard header +} + +var remainingHeaders = []string{ + "X-Ratelimit-Remaining", + "Ratelimit-Remaining", + "X-RateLimit-Requests-Remaining", // Linear uses a non-standard header +} + +var resetAtHeaders = []string{ + "X-Ratelimit-Reset", + "Ratelimit-Reset", + "X-RateLimit-Requests-Reset", // Linear uses a non-standard header + "Retry-After", // Often returned with 429 +} + +const thirtyYears = 60 * 60 * 24 * 365 * (2000 - 1970) + +// Many APIs don't follow standards and return incorrect datetimes. This function tries to handle those cases. +func parseTime(timeStr string) (time.Time, error) { + var t time.Time + res, err := strconv.ParseInt(timeStr, 10, 64) + if err != nil { + t, err = time.Parse(time.RFC850, timeStr) + if err != nil { + // Datetimes should be RFC850 but some APIs return RFC3339 + t, err = time.Parse(time.RFC3339, timeStr) + } + return t, err + } + + // Times are supposed to be in seconds, but some APIs return milliseconds + if res > thirtyYears*1000 { + res /= 1000 + } + + // Times are supposed to be offsets, but some return absolute seconds since 1970. + if res > thirtyYears { + // If more than 30 years, it's probably an absolute timestamp + t = time.Unix(res, 0) + } else { + // Otherwise, it's a relative timestamp + t = time.Now().Add(time.Second * time.Duration(res)) + } + + return t, nil +} + func ExtractRateLimitData(statusCode int, header *http.Header) (*v2.RateLimitDescription, error) { if header == nil { return nil, nil @@ -34,43 +85,53 @@ func ExtractRateLimitData(statusCode int, header *http.Header) (*v2.RateLimitDes var limit int64 var err error - limitStr := header.Get("X-Ratelimit-Limit") - if limitStr != "" { - limit, err = strconv.ParseInt(limitStr, 10, 64) - if err != nil { - return nil, err + for _, limitHeader := range limitHeaders { + limitStr := header.Get(limitHeader) + if limitStr != "" { + limit, err = strconv.ParseInt(limitStr, 10, 64) + if err != nil { + return nil, err + } + break } } var remaining int64 - remainingStr := header.Get("X-Ratelimit-Remaining") - if remainingStr != "" { - remaining, err = strconv.ParseInt(remainingStr, 10, 64) - if err != nil { - return nil, err - } - if remaining > 0 { - rlstatus = v2.RateLimitDescription_STATUS_OK + for _, remainingHeader := range remainingHeaders { + remainingStr := header.Get(remainingHeader) + if remainingStr != "" { + remaining, err = strconv.ParseInt(remainingStr, 10, 64) + if err != nil { + return nil, err + } + break } } + if remaining > 0 { + rlstatus = v2.RateLimitDescription_STATUS_OK + } var resetAt time.Time - reset := header.Get("X-Ratelimit-Reset") - if reset != "" { - res, err := strconv.ParseInt(reset, 10, 64) - if err != nil { - return nil, err + for _, resetAtHeader := range resetAtHeaders { + resetAtStr := header.Get(resetAtHeader) + if resetAtStr != "" { + resetAt, err = parseTime(resetAtStr) + if err != nil { + return nil, err + } + break } + } - resetAt = time.Now().Add(time.Second * time.Duration(res)) + if statusCode == http.StatusTooManyRequests { + rlstatus = v2.RateLimitDescription_STATUS_OVERLIMIT + remaining = 0 } // If we didn't get any rate limit headers and status code is 429, return some sane defaults - if limit == 0 && remaining == 0 && resetAt.IsZero() && statusCode == http.StatusTooManyRequests { + if remaining == 0 && resetAt.IsZero() && rlstatus == v2.RateLimitDescription_STATUS_OVERLIMIT { limit = 1 - remaining = 0 resetAt = time.Now().Add(time.Second * 60) - rlstatus = v2.RateLimitDescription_STATUS_OVERLIMIT } return &v2.RateLimitDescription{ @@ -103,7 +164,7 @@ func IsXMLContentType(contentType string) bool { normalizedContentType := strings.TrimSpace(strings.ToLower(contentType)) for _, xmlContentType := range xmlContentTypes { - if normalizedContentType == xmlContentType { + if strings.HasPrefix(normalizedContentType, xmlContentType) { return true } } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/logging/logging.go b/vendor/github.com/conductorone/baton-sdk/pkg/logging/logging.go index c31c6ed..50c6fe3 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/logging/logging.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/logging/logging.go @@ -30,6 +30,7 @@ func WithLogFormat(format string) Option { c.Encoding = LogFormatJSON case LogFormatConsole: c.Encoding = LogFormatConsole + c.EncoderConfig = zap.NewDevelopmentEncoderConfig() default: c.Encoding = LogFormatJSON } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/metrics/instrumentor.go b/vendor/github.com/conductorone/baton-sdk/pkg/metrics/instrumentor.go new file mode 100644 index 0000000..52541d4 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/metrics/instrumentor.go @@ -0,0 +1,39 @@ +package metrics + +import ( + "context" + "time" + + "github.com/conductorone/baton-sdk/pkg/types/tasks" +) + +const ( + taskSuccessCounterName = "baton_sdk.task_success" + taskFailureCounterName = "baton_sdk.task_failure" + taskDurationHistoName = "baton_sdk.task_latency" + taskSuccessCounterDesc = "number of successful tasks by task type" + taskFailureCounterDesc = "number of failed tasks by task type" + taskDurationHistoDesc = "duration of all tasks by task type and status" +) + +type M struct { + underlying Handler +} + +func (m *M) RecordTaskSuccess(ctx context.Context, task tasks.TaskType, dur time.Duration) { + c := m.underlying.Int64Counter(taskSuccessCounterName, taskSuccessCounterDesc, Dimensionless) + h := m.underlying.Int64Histogram(taskDurationHistoName, taskDurationHistoDesc, Milliseconds) + c.Add(ctx, 1, map[string]string{"task_type": task.String()}) + h.Record(ctx, dur.Milliseconds(), map[string]string{"task_type": task.String(), "task_status": "success"}) +} + +func (m *M) RecordTaskFailure(ctx context.Context, task tasks.TaskType, dur time.Duration) { + c := m.underlying.Int64Counter(taskFailureCounterName, taskFailureCounterDesc, Dimensionless) + h := m.underlying.Int64Histogram(taskDurationHistoName, taskDurationHistoDesc, Milliseconds) + c.Add(ctx, 1, map[string]string{"task_type": task.String()}) + h.Record(ctx, dur.Milliseconds(), map[string]string{"task_type": task.String(), "task_status": "failure"}) +} + +func New(handler Handler) *M { + return &M{underlying: handler} +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/metrics/metrics.go b/vendor/github.com/conductorone/baton-sdk/pkg/metrics/metrics.go new file mode 100644 index 0000000..7af8bcd --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/metrics/metrics.go @@ -0,0 +1,32 @@ +package metrics + +import ( + "context" +) + +type Handler interface { + Int64Counter(name string, description string, unit Unit) Int64Counter + Int64Gauge(name string, description string, unit Unit) Int64Gauge + Int64Histogram(name string, description string, unit Unit) Int64Histogram + WithTags(tags map[string]string) Handler +} + +type Int64Counter interface { + Add(ctx context.Context, value int64, tags map[string]string) +} + +type Int64Histogram interface { + Record(ctx context.Context, value int64, tags map[string]string) +} + +type Int64Gauge interface { + Observe(ctx context.Context, value int64, tags map[string]string) +} + +type Unit string + +const ( + Dimensionless Unit = "1" + Bytes Unit = "By" + Milliseconds Unit = "ms" +) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/metrics/noop.go b/vendor/github.com/conductorone/baton-sdk/pkg/metrics/noop.go new file mode 100644 index 0000000..eec13f8 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/metrics/noop.go @@ -0,0 +1,39 @@ +package metrics + +import "context" + +type noopRecorder struct{} + +func (*noopRecorder) Record(_ context.Context, _ int64, _ map[string]string) {} + +func (*noopRecorder) Add(_ context.Context, _ int64, _ map[string]string) {} + +func (*noopRecorder) Observe(_ context.Context, _ int64, _ map[string]string) {} + +var _ Int64Counter = (*noopRecorder)(nil) +var _ Int64Histogram = (*noopRecorder)(nil) +var _ Int64Gauge = (*noopRecorder)(nil) + +type noopHandler struct{} + +func (*noopHandler) Int64Counter(_ string, _ string, _ Unit) Int64Counter { + return &noopRecorder{} +} + +func (*noopHandler) Int64Gauge(_ string, _ string, _ Unit) Int64Gauge { + return &noopRecorder{} +} + +func (*noopHandler) Int64Histogram(_ string, _ string, _ Unit) Int64Histogram { + return &noopRecorder{} +} + +func (*noopHandler) WithTags(_ map[string]string) Handler { + return &noopHandler{} +} + +var _ Handler = (*noopHandler)(nil) + +func NewNoOpHandler(_ context.Context) Handler { + return &noopHandler{} +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/metrics/otel.go b/vendor/github.com/conductorone/baton-sdk/pkg/metrics/otel.go new file mode 100644 index 0000000..9a43d79 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/metrics/otel.go @@ -0,0 +1,183 @@ +package metrics + +import ( + "context" + "strings" + "sync" + + "go.opentelemetry.io/otel/attribute" + otelmetric "go.opentelemetry.io/otel/metric" +) + +var ( + _ Handler = (*otelHandler)(nil) + _ Int64Counter = (*otelInt64Counter)(nil) + _ Int64Gauge = (*otelInt64Gauge)(nil) + _ Int64Histogram = (*otelInt64Histogram)(nil) +) + +type otelHandler struct { + name string + meter otelmetric.Meter + provider otelmetric.MeterProvider + defaultAttrs *[]attribute.KeyValue + + int64CountersMtx sync.Mutex + int64Counters map[string]*otelInt64Counter + int64HistosMtx sync.Mutex + int64Histos map[string]*otelInt64Histogram + int64GaugesMtx sync.Mutex + int64Gauges map[string]*otelInt64Gauge +} + +type baseAttrs struct { + defaultAttrs *[]attribute.KeyValue +} + +func (a *baseAttrs) getAttributes(tags map[string]string) []attribute.KeyValue { + attrs := makeAttrs(tags) + if a.defaultAttrs != nil { + attrs = append(attrs, *a.defaultAttrs...) + } + + return attrs +} + +func (a *baseAttrs) setDefaultAttrs(attrs *[]attribute.KeyValue) { + a.defaultAttrs = attrs +} + +type otelInt64Counter struct { + *baseAttrs + counter otelmetric.Int64Counter +} + +func (c *otelInt64Counter) Add(ctx context.Context, value int64, tags map[string]string) { + attrs := c.getAttributes(tags) + + c.counter.Add(ctx, value, otelmetric.WithAttributes(attrs...)) +} + +type otelInt64Histogram struct { + *baseAttrs + histo otelmetric.Int64Histogram +} + +func (h *otelInt64Histogram) Record(ctx context.Context, value int64, tags map[string]string) { + attrs := h.getAttributes(tags) + + h.histo.Record(ctx, value, otelmetric.WithAttributes(attrs...)) +} + +type otelInt64Gauge struct { + *baseAttrs + value int64 + attrs []attribute.KeyValue + gauge otelmetric.Int64ObservableGauge +} + +func (g *otelInt64Gauge) Observe(_ context.Context, value int64, tags map[string]string) { + g.attrs = g.getAttributes(tags) + g.value = value +} + +func (h *otelHandler) Int64Histogram(name string, description string, unit Unit) Int64Histogram { + h.int64HistosMtx.Lock() + defer h.int64HistosMtx.Unlock() + + name = strings.ToLower(name) + + c, ok := h.int64Histos[name] + if !ok { + histo, err := h.meter.Int64Histogram(name, otelmetric.WithDescription(description), otelmetric.WithUnit(string(unit))) + if err != nil { + panic(err) + } + c = &otelInt64Histogram{histo: histo, baseAttrs: &baseAttrs{}} + h.int64Histos[name] = c + } + + c.setDefaultAttrs(h.defaultAttrs) + + return c +} + +func (h *otelHandler) Int64Counter(name string, description string, unit Unit) Int64Counter { + h.int64CountersMtx.Lock() + defer h.int64CountersMtx.Unlock() + + name = strings.ToLower(name) + + c, ok := h.int64Counters[name] + if !ok { + counter, err := h.meter.Int64Counter(name, otelmetric.WithDescription(description), otelmetric.WithUnit(string(unit))) + if err != nil { + panic(err) + } + c = &otelInt64Counter{counter: counter, baseAttrs: &baseAttrs{}} + h.int64Counters[name] = c + } + + c.setDefaultAttrs(h.defaultAttrs) + + return c +} + +func (h *otelHandler) Int64Gauge(name string, description string, unit Unit) Int64Gauge { + h.int64GaugesMtx.Lock() + defer h.int64GaugesMtx.Unlock() + + name = strings.ToLower(name) + + c, ok := h.int64Gauges[name] + if !ok { + gauge, err := h.meter.Int64ObservableGauge(name, otelmetric.WithDescription(description), otelmetric.WithUnit(string(unit))) + if err != nil { + panic(err) + } + + c = &otelInt64Gauge{gauge: gauge, baseAttrs: &baseAttrs{}} + + _, err = h.meter.RegisterCallback(func(ctx context.Context, observer otelmetric.Observer) error { + observer.ObserveInt64(c.gauge, c.value, otelmetric.WithAttributes(c.attrs...)) + return nil + }, c.gauge) + if err != nil { + panic(err) + } + + h.int64Gauges[name] = c + } + + c.setDefaultAttrs(h.defaultAttrs) + + return c +} + +func (h *otelHandler) WithTags(tags map[string]string) Handler { + attrs := makeAttrs(tags) + + h.defaultAttrs = &attrs + + return h +} + +func makeAttrs(tags map[string]string) []attribute.KeyValue { + attrs := make([]attribute.KeyValue, 0, len(tags)) + for k, v := range tags { + attrs = append(attrs, attribute.String(k, v)) + } + + return attrs +} + +func NewOtelHandler(_ context.Context, provider otelmetric.MeterProvider, name string) Handler { + return &otelHandler{ + name: name, + meter: provider.Meter(name), + provider: provider, + int64Counters: make(map[string]*otelInt64Counter), + int64Histos: make(map[string]*otelInt64Histogram), + int64Gauges: make(map[string]*otelInt64Gauge), + } +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/pagination/pagination.go b/vendor/github.com/conductorone/baton-sdk/pkg/pagination/pagination.go index 563ac26..74082b6 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/pagination/pagination.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/pagination/pagination.go @@ -21,9 +21,9 @@ type Token struct { } type PageState struct { - Token string `json:"token"` - ResourceTypeID string `json:"resource_type_id"` - ResourceID string `json:"resource_id"` + Token string `json:"token,omitempty"` + ResourceTypeID string `json:"type,omitempty"` + ResourceID string `json:"id,omitempty"` } // Bag holds pagination states that can be serialized for use as page tokens. It acts as a stack that you can push and pop diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go index f98920c..a4f5f6c 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go @@ -1,4 +1,3 @@ package sdk -// Version is the current version of the baton SDK. -const Version = "0.1.30" +const Version = "v0.2.10-15-gfd76fb6" diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand.go b/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand.go deleted file mode 100644 index 6d339c4..0000000 --- a/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand.go +++ /dev/null @@ -1,401 +0,0 @@ -package sync - -import ( - "context" - "errors" - "fmt" - "reflect" - - v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" -) - -var ( - ErrNoEntitlement = errors.New("no entitlement found") -) - -type EntitlementGraphAction struct { - SourceEntitlementID string `json:"source_entitlement_id"` - DescendantEntitlementID string `json:"descendant_entitlement_id"` - Shallow bool `json:"shallow"` - ResourceTypeIDs []string `json:"resource_types_ids"` - PageToken string `json:"page_token"` -} - -// Edges between entitlements are grants. -type grantInfo struct { - Expanded bool `json:"expanded"` - Shallow bool `json:"shallow"` - ResourceTypeIDs []string `json:"resource_type_ids"` -} - -type Node struct { - Id int `json:"id"` - EntitlementIDs []string `json:"entitlementIds"` // List of entitlements. -} - -type EntitlementGraph struct { - NodeCount int `json:"node_count"` - Nodes map[int]Node `json:"nodes"` - Edges map[int]map[int]*grantInfo `json:"edges"` // Adjacency list. Source node -> destination node - Loaded bool `json:"loaded"` - Depth int `json:"depth"` - Actions []EntitlementGraphAction `json:"actions"` -} - -func NewEntitlementGraph(ctx context.Context) *EntitlementGraph { - return &EntitlementGraph{ - NodeCount: 1, // Start at 1 in case we don't initialize something and try to get node 0 - Nodes: make(map[int]Node), - // TODO: probably want this for efficiency - // EntitlementsToNodes: make(map[string]int), - Edges: make(map[int]map[int]*grantInfo), - } -} - -func (g *EntitlementGraph) Str() string { - str := "\n" - for id, node := range g.Nodes { - str += fmt.Sprintf("node %v entitlement IDs: %v\n", id, node.EntitlementIDs) - } - str += "edges:\n" - for src, dsts := range g.Edges { - for dst, gi := range dsts { - str += fmt.Sprintf("%v -> %v grantInfo %v\n", src, dst, gi) - } - } - return str -} - -func (g *EntitlementGraph) Validate() error { - for srcNodeId, dstNodeIDs := range g.Edges { - node, ok := g.Nodes[srcNodeId] - if !ok { - return ErrNoEntitlement - } - if len(node.EntitlementIDs) == 0 { - return fmt.Errorf("empty node") - } - for dstNodeId, grantInfo := range dstNodeIDs { - node, ok := g.Nodes[dstNodeId] - if !ok { - return ErrNoEntitlement - } - if len(node.EntitlementIDs) == 0 { - return fmt.Errorf("empty node") - } - if grantInfo == nil { - return fmt.Errorf("nil edge info. dst node %v", dstNodeId) - } - } - } - // check for entitlement ids that are in multiple nodes - seenEntitlements := make(map[string]int) - for nodeID, node := range g.Nodes { - for _, entID := range node.EntitlementIDs { - if _, ok := seenEntitlements[entID]; ok { - return fmt.Errorf("entitlement %v is in multiple nodes: %v %v", entID, nodeID, seenEntitlements[entID]) - } - seenEntitlements[entID] = nodeID - } - } - return nil -} - -func (g *EntitlementGraph) isNodeExpanded(nodeID int) bool { - dstNodeIDs := g.Edges[nodeID] - for _, edgeInfo := range dstNodeIDs { - if !edgeInfo.Expanded { - return false - } - } - return true -} - -// IsExpanded returns true if all entitlements in the graph have been expanded. -func (g *EntitlementGraph) IsExpanded() bool { - for srcNodeID := range g.Edges { - if !g.isNodeExpanded(srcNodeID) { - return false - } - } - return true -} - -// IsEntitlementExpanded returns true if all the outgoing edges for the given entitlement have been expanded. -func (g *EntitlementGraph) IsEntitlementExpanded(entitlementID string) bool { - node := g.GetNode(entitlementID) - if node == nil { - // TODO: log error? return error? - return false - } - if !g.isNodeExpanded(node.Id) { - return false - } - return true -} - -// HasUnexpandedAncestors returns true if the given entitlement has ancestors that have not been expanded yet. -func (g *EntitlementGraph) HasUnexpandedAncestors(entitlementID string) bool { - node := g.GetNode(entitlementID) - if node == nil { - return false - } - - for _, ancestorId := range g.getAncestors(entitlementID) { - if !g.isNodeExpanded(ancestorId) { - return true - } - } - return false -} - -func (g *EntitlementGraph) getAncestors(entitlementID string) []int { - node := g.GetNode(entitlementID) - if node == nil { - panic("entitlement not found") - // return nil - } - - ancestors := make([]int, 0) - for src, dst := range g.Edges { - if _, ok := dst[node.Id]; ok { - ancestors = append(ancestors, src) - } - } - return ancestors -} - -// Find the direct ancestors of the given entitlement. The 'all' flag returns all ancestors regardless of 'done' state. -func (g *EntitlementGraph) GetCycles() ([][]int, bool) { - rv := make([][]int, 0) - for nodeID := range g.Nodes { - edges, ok := g.Edges[nodeID] - if !ok || len(edges) == 0 { - continue - } - cycle, isCycle := g.getCycle([]int{nodeID}) - if isCycle && !isInCycle(cycle, rv) { - rv = append(rv, cycle) - } - } - - return rv, len(rv) > 0 -} - -func isInCycle(newCycle []int, cycles [][]int) bool { - for _, cycle := range cycles { - if len(cycle) > 0 && reflect.DeepEqual(cycle, newCycle) { - return true - } - } - return false -} - -func shift(arr []int, n int) []int { - for i := 0; i < n; i++ { - arr = append(arr[1:], arr[0]) - } - return arr -} - -func (g *EntitlementGraph) getCycle(visits []int) ([]int, bool) { - if len(visits) == 0 { - return nil, false - } - nodeId := visits[len(visits)-1] - for descendantId := range g.Edges[nodeId] { - tempVisits := make([]int, len(visits)) - copy(tempVisits, visits) - if descendantId == visits[0] { - // shift array so that the smallest element is first - smallestIndex := 0 - for i := range tempVisits { - if tempVisits[i] < tempVisits[smallestIndex] { - smallestIndex = i - } - } - tempVisits = shift(tempVisits, smallestIndex) - return tempVisits, true - } - for _, visit := range visits { - if visit == descendantId { - return nil, false - } - } - - tempVisits = append(tempVisits, descendantId) - return g.getCycle(tempVisits) - } - return nil, false -} - -func (g *EntitlementGraph) GetDescendantEntitlements(entitlementID string) map[string]*grantInfo { - node := g.GetNode(entitlementID) - if node == nil { - return nil - } - entsToGrants := make(map[string]*grantInfo) - for dstNodeId, edgeInfo := range g.Edges[node.Id] { - dstNode := g.Nodes[dstNodeId] - for _, entId := range dstNode.EntitlementIDs { - entsToGrants[entId] = edgeInfo - } - } - return entsToGrants -} - -func (g *EntitlementGraph) HasEntitlement(entitlementID string) bool { - return g.GetNode(entitlementID) != nil -} - -func (g *EntitlementGraph) AddEntitlement(entitlement *v2.Entitlement) { - node := g.GetNode(entitlement.Id) - if node != nil { - return - } - - g.Nodes[g.NodeCount] = Node{ - Id: g.NodeCount, - EntitlementIDs: []string{entitlement.Id}, - } - g.NodeCount++ -} - -func (g *EntitlementGraph) GetEntitlements() []string { - var entitlements []string - for _, node := range g.Nodes { - entitlements = append(entitlements, node.EntitlementIDs...) - } - return entitlements -} - -func (g *EntitlementGraph) MarkEdgeExpanded(sourceEntitlementID string, descendantEntitlementID string) { - srcNode := g.GetNode(sourceEntitlementID) - if srcNode == nil { - // TODO: panic? - return - } - dstNode := g.GetNode(descendantEntitlementID) - if dstNode == nil { - // TODO: panic? - return - } - _, ok := g.Edges[srcNode.Id][dstNode.Id] - if !ok { - return - } - - g.Edges[srcNode.Id][dstNode.Id].Expanded = true -} - -func (g *EntitlementGraph) GetNode(entitlementId string) *Node { - // TODO: add an EntitlementToNode map for efficiency - for _, node := range g.Nodes { - for _, entId := range node.EntitlementIDs { - if entId == entitlementId { - return &node - } - } - } - return nil -} - -func (g *EntitlementGraph) AddEdge(ctx context.Context, srcEntitlementID string, dstEntitlementID string, shallow bool, resourceTypeIDs []string) error { - srcNode := g.GetNode(srcEntitlementID) - if srcNode == nil { - return ErrNoEntitlement - } - dstNode := g.GetNode(dstEntitlementID) - if dstNode == nil { - return ErrNoEntitlement - } - - _, ok := g.Edges[srcNode.Id] - if !ok { - g.Edges[srcNode.Id] = make(map[int]*grantInfo) - } - - _, ok = g.Edges[srcNode.Id][dstNode.Id] - if !ok { - g.Edges[srcNode.Id][dstNode.Id] = &grantInfo{ - Expanded: false, - Shallow: shallow, - ResourceTypeIDs: resourceTypeIDs, - } - } else { - // TODO: just do nothing? it's probably a mistake if we're adding the same edge twice - ctxzap.Extract(ctx).Warn( - "duplicate edge from datasource", - zap.String("src_entitlement_id", srcEntitlementID), - zap.String("dst_entitlement_id", dstEntitlementID), - zap.Bool("shallow", shallow), - zap.Strings("resource_type_ids", resourceTypeIDs), - ) - return nil - } - return nil -} - -func (g *EntitlementGraph) removeNode(nodeID int) { - delete(g.Nodes, nodeID) - delete(g.Edges, nodeID) - for id := range g.Edges { - delete(g.Edges[id], nodeID) - } -} - -func (g *EntitlementGraph) mergeNodes(node1ID int, node2ID int) { - node1 := g.Nodes[node1ID] - node2 := g.Nodes[node2ID] - - // Put node's entitlements on first node - node1.EntitlementIDs = append(node1.EntitlementIDs, node2.EntitlementIDs...) - - // TODO: Merge grant info? - - for dstNodeID := range g.Edges[node2ID] { - dstNode := g.Nodes[dstNodeID] - if dstNodeID == node1ID { - continue - } - - // Set outgoing edges on first node - for id := range g.Edges[dstNodeID] { - if id != node1.Id { - g.Edges[node1.Id][id] = g.Edges[dstNode.Id][id] - } - } - } - - // Set incoming edges on first node - for srcID, edges := range g.Edges { - for dstID, gi := range edges { - if dstID == node2ID && srcID != node1ID { - g.Edges[srcID][node1ID] = gi - delete(g.Edges[srcID], dstID) - } - } - } - - // Delete node - g.removeNode(node2ID) -} - -func (g *EntitlementGraph) FixCycles() error { - // If we can't fix the cycles in 10 tries, just give up - const maxTries = 10 - for i := 0; i < maxTries; i++ { - cycles, hasCycles := g.GetCycles() - if !hasCycles { - return nil - } - - // Merge all the nodes in a cycle. - for i := 1; i < len(cycles[0]); i++ { - g.mergeNodes(cycles[0][0], cycles[0][i]) - } - } - return fmt.Errorf("could not fix cycles after %v tries", maxTries) -} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand/cycle.go b/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand/cycle.go new file mode 100644 index 0000000..bea170c --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand/cycle.go @@ -0,0 +1,213 @@ +package expand + +import ( + mapset "github.com/deckarep/golang-set/v2" +) + +// GetFirstCycle given an entitlements graph, return a cycle by node ID if it +// exists. Returns nil if no cycle exists. If there is a single +// node pointing to itself, that will count as a cycle. +func (g *EntitlementGraph) GetFirstCycle() []int { + visited := mapset.NewSet[int]() + for nodeID := range g.Nodes { + cycle, hasCycle := g.cycleDetectionHelper(nodeID, visited, []int{}) + if hasCycle { + return cycle + } + } + + return nil +} + +func (g *EntitlementGraph) cycleDetectionHelper( + nodeID int, + visited mapset.Set[int], + currentCycle []int, +) ([]int, bool) { + visited.Add(nodeID) + if destinations, ok := g.SourcesToDestinations[nodeID]; ok { + for destinationID := range destinations { + nextCycle := make([]int, len(currentCycle)) + copy(nextCycle, currentCycle) + nextCycle = append(nextCycle, nodeID) + + if !visited.Contains(destinationID) { + if cycle, hasCycle := g.cycleDetectionHelper(destinationID, visited, nextCycle); hasCycle { + return cycle, true + } + } else { + // Make sure to not include part of the start before the cycle. + outputCycle := make([]int, 0) + for i := len(nextCycle) - 1; i >= 0; i-- { + outputCycle = append(outputCycle, nextCycle[i]) + if nextCycle[i] == destinationID { + return outputCycle, true + } + } + } + } + } + return nil, false +} + +// removeNode obliterates a node and all incoming/outgoing edges. +func (g *EntitlementGraph) removeNode(nodeID int) { + // Delete from reverse mapping. + if node, ok := g.Nodes[nodeID]; ok { + for _, entitlementID := range node.EntitlementIDs { + entNodeId, ok := g.EntitlementsToNodes[entitlementID] + if ok && entNodeId == nodeID { + delete(g.EntitlementsToNodes, entitlementID) + } + } + } + + // Delete from nodes list. + delete(g.Nodes, nodeID) + + // Delete all outgoing edges. + if destinations, ok := g.SourcesToDestinations[nodeID]; ok { + for destinationID, edgeID := range destinations { + delete(g.DestinationsToSources[destinationID], nodeID) + delete(g.Edges, edgeID) + } + } + delete(g.SourcesToDestinations, nodeID) + + // Delete all incoming edges. + if sources, ok := g.DestinationsToSources[nodeID]; ok { + for sourceID, edgeID := range sources { + delete(g.SourcesToDestinations[sourceID], nodeID) + delete(g.Edges, edgeID) + } + } + delete(g.SourcesToDestinations, nodeID) +} + +// FixCycles if any cycles of nodes exist, merge all nodes in that cycle into a +// single node and then repeat. Iteration ends when there are no more cycles. +func (g *EntitlementGraph) FixCycles() error { + cycle := g.GetFirstCycle() + if cycle == nil { + return nil + } + + if err := g.fixCycle(cycle); err != nil { + return err + } + + // Recurse! + return g.FixCycles() +} + +// fixCycle takes a list of Node IDs that form a cycle and merges them into a +// single, new node. +func (g *EntitlementGraph) fixCycle(nodeIDs []int) error { + entitlementIDs := mapset.NewSet[string]() + outgoingEdgesToResourceTypeIDs := map[int]mapset.Set[string]{} + incomingEdgesToResourceTypeIDs := map[int]mapset.Set[string]{} + for _, nodeID := range nodeIDs { + if node, ok := g.Nodes[nodeID]; ok { + // Gather entitlements. + for _, entitlementID := range node.EntitlementIDs { + entitlementIDs.Add(entitlementID) + } + + // Gather all incoming edges. + if sources, ok := g.DestinationsToSources[nodeID]; ok { + for sourceNodeID, edgeID := range sources { + if edge, ok := g.Edges[edgeID]; ok { + resourceTypeIDs, ok := incomingEdgesToResourceTypeIDs[sourceNodeID] + if !ok { + resourceTypeIDs = mapset.NewSet[string]() + } + for _, resourceTypeID := range edge.ResourceTypeIDs { + resourceTypeIDs.Add(resourceTypeID) + } + incomingEdgesToResourceTypeIDs[sourceNodeID] = resourceTypeIDs + } + } + } + + // Gather all outgoing edges. + if destinations, ok := g.SourcesToDestinations[nodeID]; ok { + for destinationNodeID, edgeID := range destinations { + if edge, ok := g.Edges[edgeID]; ok { + resourceTypeIDs, ok := outgoingEdgesToResourceTypeIDs[destinationNodeID] + if !ok { + resourceTypeIDs = mapset.NewSet[string]() + } + for _, resourceTypeID := range edge.ResourceTypeIDs { + resourceTypeIDs.Add(resourceTypeID) + } + outgoingEdgesToResourceTypeIDs[destinationNodeID] = resourceTypeIDs + } + } + } + } + } + + // Create a new node with the entitlements. + g.NextNodeID++ + node := Node{ + Id: g.NextNodeID, + EntitlementIDs: entitlementIDs.ToSlice(), + } + g.Nodes[node.Id] = node + for entitlementID := range entitlementIDs.Iter() { + // Break the old connections and point to this node. + g.EntitlementsToNodes[entitlementID] = node.Id + } + + // Hook up edges + for destinationID, resourceTypeIDs := range outgoingEdgesToResourceTypeIDs { + g.NextEdgeID++ + edge := Edge{ + EdgeID: g.NextEdgeID, + SourceID: node.Id, + DestinationID: destinationID, + IsExpanded: false, + IsShallow: false, + ResourceTypeIDs: resourceTypeIDs.ToSlice(), + } + g.Edges[edge.EdgeID] = edge + if _, ok := g.SourcesToDestinations[node.Id]; !ok { + g.SourcesToDestinations[node.Id] = make(map[int]int) + } + g.SourcesToDestinations[node.Id][destinationID] = edge.EdgeID + if _, ok := g.DestinationsToSources[destinationID]; !ok { + g.DestinationsToSources[destinationID] = make(map[int]int) + } + g.DestinationsToSources[destinationID][node.Id] = edge.EdgeID + } + for sourceID, resourceTypeIDs := range incomingEdgesToResourceTypeIDs { + g.NextEdgeID++ + edge := Edge{ + EdgeID: g.NextEdgeID, + SourceID: sourceID, + DestinationID: node.Id, + IsExpanded: false, + IsShallow: false, + ResourceTypeIDs: resourceTypeIDs.ToSlice(), + } + g.Edges[edge.EdgeID] = edge + + if _, ok := g.SourcesToDestinations[sourceID]; !ok { + g.SourcesToDestinations[sourceID] = make(map[int]int) + } + g.SourcesToDestinations[sourceID][node.Id] = edge.EdgeID + + if _, ok := g.DestinationsToSources[node.Id]; !ok { + g.DestinationsToSources[node.Id] = make(map[int]int) + } + g.DestinationsToSources[node.Id][sourceID] = edge.EdgeID + } + + // Call delete to delete the node and every associated edge. This will + // conveniently delete all edges that were internal to the cycle. + for _, nodeID := range nodeIDs { + g.removeNode(nodeID) + } + + return nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand/graph.go b/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand/graph.go new file mode 100644 index 0000000..dbdce81 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand/graph.go @@ -0,0 +1,283 @@ +package expand + +import ( + "context" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "go.uber.org/zap" +) + +type EntitlementGraphAction struct { + SourceEntitlementID string `json:"source_entitlement_id"` + DescendantEntitlementID string `json:"descendant_entitlement_id"` + Shallow bool `json:"shallow"` + ResourceTypeIDs []string `json:"resource_types_ids"` + PageToken string `json:"page_token"` +} + +type Edge struct { + EdgeID int `json:"edge_id"` + SourceID int `json:"source_id"` + DestinationID int `json:"destination_id"` + IsExpanded bool `json:"expanded"` + IsShallow bool `json:"shallow"` + ResourceTypeIDs []string `json:"resource_type_ids"` +} + +// Node represents a list of entitlements. It is the base element of the graph. +type Node struct { + Id int `json:"id"` + EntitlementIDs []string `json:"entitlementIds"` // List of entitlements. +} + +// EntitlementGraph - a directed graph representing the relationships between +// entitlements and grants. This data structure is naïve to any business logic. +// Note that the data of each Node is actually a list or IDs, not a single ID. +// This is because the graph can have cycles, and we address them by reducing +// _all_ nodes in a cycle into a single node. +type EntitlementGraph struct { + NextNodeID int `json:"next_node_id"` // Automatically incremented so that each node has a unique ID. + NextEdgeID int `json:"next_edge_id"` // Automatically incremented so that each edge has a unique ID. + Nodes map[int]Node `json:"nodes"` // The mapping of all node IDs to nodes. + EntitlementsToNodes map[string]int `json:"entitlements_to_nodes"` // Internal mapping of entitlements to nodes for quicker lookup. + SourcesToDestinations map[int]map[int]int `json:"sources_to_destinations"` // Internal mapping of outgoing edges by node ID. + DestinationsToSources map[int]map[int]int `json:"destinations_to_sources"` // Internal mapping of incoming edges by node ID. + Edges map[int]Edge `json:"edges"` // Adjacency list. Source node -> descendant node + Loaded bool `json:"loaded"` + Depth int `json:"depth"` + Actions []EntitlementGraphAction `json:"actions"` +} + +func NewEntitlementGraph(_ context.Context) *EntitlementGraph { + return &EntitlementGraph{ + DestinationsToSources: make(map[int]map[int]int), + Edges: make(map[int]Edge), + EntitlementsToNodes: make(map[string]int), + Nodes: make(map[int]Node), + SourcesToDestinations: make(map[int]map[int]int), + } +} + +// isNodeExpanded - is every outgoing edge from this node Expanded? +func (g *EntitlementGraph) isNodeExpanded(nodeID int) bool { + for _, edgeID := range g.SourcesToDestinations[nodeID] { + if edge, ok := g.Edges[edgeID]; ok { + if !edge.IsExpanded { + return false + } + } + } + return true +} + +// IsExpanded returns true if all entitlements in the graph have been expanded. +func (g *EntitlementGraph) IsExpanded() bool { + for _, edge := range g.Edges { + if !edge.IsExpanded { + return false + } + } + return true +} + +// IsEntitlementExpanded returns true if all the outgoing edges for the given entitlement have been expanded. +func (g *EntitlementGraph) IsEntitlementExpanded(entitlementID string) bool { + node := g.GetNode(entitlementID) + if node == nil { + // TODO: log error? return error? + return false + } + return g.isNodeExpanded(node.Id) +} + +// HasUnexpandedAncestors returns true if the given entitlement has ancestors that have not been expanded yet. +func (g *EntitlementGraph) HasUnexpandedAncestors(entitlementID string) bool { + node := g.GetNode(entitlementID) + if node == nil { + return false + } + + for _, ancestorId := range g.getParents(entitlementID) { + if !g.isNodeExpanded(ancestorId) { + return true + } + } + return false +} + +// getParents gets _all_ IDs of nodes that have this entitlement as a child. +func (g *EntitlementGraph) getParents(entitlementID string) []int { + node := g.GetNode(entitlementID) + if node == nil { + panic("entitlement not found") + } + + parents := make([]int, 0) + if destinations, ok := g.DestinationsToSources[node.Id]; ok { + for id := range destinations { + parents = append(parents, id) + } + } + return parents +} + +// GetDescendantEntitlements given an entitlementID, return a mapping of child +// entitlementIDs to edge data. +func (g *EntitlementGraph) GetDescendantEntitlements(entitlementID string) map[string]*Edge { + node := g.GetNode(entitlementID) + if node == nil { + return nil + } + entitlementsToEdges := make(map[string]*Edge) + if destinations, ok := g.SourcesToDestinations[node.Id]; ok { + for destinationID, edgeID := range destinations { + if destination, ok := g.Nodes[destinationID]; ok { + for _, entitlementID := range destination.EntitlementIDs { + if edge, ok := g.Edges[edgeID]; ok { + entitlementsToEdges[entitlementID] = &edge + } + } + } + } + } + return entitlementsToEdges +} + +func (g *EntitlementGraph) HasEntitlement(entitlementID string) bool { + return g.GetNode(entitlementID) != nil +} + +// AddEntitlement - add an entitlement's ID as an unconnected node in the graph. +func (g *EntitlementGraph) AddEntitlement(entitlement *v2.Entitlement) { + // If the entitlement is already in the graph, fail silently. + found := g.GetNode(entitlement.Id) + if found != nil { + return + } + + // Start at 1 in case we don't initialize something and try to get node 0. + g.NextNodeID++ + + // Create a new node. + node := Node{ + Id: g.NextNodeID, + EntitlementIDs: []string{entitlement.Id}, + } + + // Add the node to the data structures. + g.Nodes[node.Id] = node + g.EntitlementsToNodes[entitlement.Id] = node.Id +} + +// GetEntitlements returns a combined list of _all_ entitlements from all nodes. +func (g *EntitlementGraph) GetEntitlements() []string { + var entitlements []string + for _, node := range g.Nodes { + entitlements = append(entitlements, node.EntitlementIDs...) + } + return entitlements +} + +// MarkEdgeExpanded given source and destination entitlements, mark the edge +// between them as "expanded". +func (g *EntitlementGraph) MarkEdgeExpanded(sourceEntitlementID string, descendantEntitlementID string) { + srcNode := g.GetNode(sourceEntitlementID) + if srcNode == nil { + // TODO: panic? + return + } + dstNode := g.GetNode(descendantEntitlementID) + if dstNode == nil { + // TODO: panic? + return + } + + if destinations, ok := g.SourcesToDestinations[srcNode.Id]; ok { + if edgeID, ok := destinations[dstNode.Id]; ok { + if edge, ok := g.Edges[edgeID]; ok { + edge.IsExpanded = true + g.Edges[edgeID] = edge + } + } + } +} + +// GetNode - returns the node that contains the given `entitlementID`. +func (g *EntitlementGraph) GetNode(entitlementID string) *Node { + nodeID, ok := g.EntitlementsToNodes[entitlementID] + if !ok { + return nil + } + node, ok := g.Nodes[nodeID] + if !ok { + return nil + } + return &node +} + +// AddEdge - given two entitlements, add an edge with resourceTypeIDs. +func (g *EntitlementGraph) AddEdge( + ctx context.Context, + srcEntitlementID string, + dstEntitlementID string, + isShallow bool, + resourceTypeIDs []string, +) error { + srcNode := g.GetNode(srcEntitlementID) + if srcNode == nil { + return ErrNoEntitlement + } + dstNode := g.GetNode(dstEntitlementID) + if dstNode == nil { + return ErrNoEntitlement + } + + if destinations, ok := g.SourcesToDestinations[srcNode.Id]; ok { + if _, ok = destinations[dstNode.Id]; ok { + // TODO: just do nothing? it's probably a mistake if we're adding the same edge twice + ctxzap.Extract(ctx).Warn( + "duplicate edge from datasource", + zap.String("src_entitlement_id", srcEntitlementID), + zap.String("dst_entitlement_id", dstEntitlementID), + zap.Bool("shallow", isShallow), + zap.Strings("resource_type_ids", resourceTypeIDs), + ) + return nil + } + } else { + g.SourcesToDestinations[srcNode.Id] = make(map[int]int) + } + + if sources, ok := g.DestinationsToSources[dstNode.Id]; ok { + if _, ok = sources[srcNode.Id]; ok { + // TODO: just do nothing? it's probably a mistake if we're adding the same edge twice + ctxzap.Extract(ctx).Warn( + "duplicate edge from datasource", + zap.String("src_entitlement_id", srcEntitlementID), + zap.String("dst_entitlement_id", dstEntitlementID), + zap.Bool("shallow", isShallow), + zap.Strings("resource_type_ids", resourceTypeIDs), + ) + return nil + } + } else { + g.DestinationsToSources[dstNode.Id] = make(map[int]int) + } + + // Start at 1 in case we don't initialize something and try to get edge 0. + g.NextEdgeID++ + edge := Edge{ + EdgeID: g.NextEdgeID, + SourceID: srcNode.Id, + DestinationID: dstNode.Id, + IsExpanded: false, + IsShallow: isShallow, + ResourceTypeIDs: resourceTypeIDs, + } + + g.Edges[g.NextEdgeID] = edge + g.SourcesToDestinations[srcNode.Id][dstNode.Id] = edge.EdgeID + g.DestinationsToSources[dstNode.Id][srcNode.Id] = edge.EdgeID + return nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand/validate.go b/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand/validate.go new file mode 100644 index 0000000..fdb94e8 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand/validate.go @@ -0,0 +1,134 @@ +package expand + +import ( + "errors" + "fmt" + "slices" + "strings" +) + +var ( + ErrNoEntitlement = errors.New("no entitlement found") +) + +func (node *Node) Str() string { + return fmt.Sprintf( + "node %v entitlement IDs: %v", + node.Id, + node.EntitlementIDs, + ) +} + +func (edge *Edge) Str() string { + return fmt.Sprintf( + "%v -> %v { expanded: %v, shallow: %v, resources: %v }", + edge.SourceID, + edge.DestinationID, + edge.IsExpanded, + edge.IsShallow, + edge.ResourceTypeIDs, + ) +} + +// Str lists every `node` line by line followed by every `edge`. Useful for debugging. +func (g *EntitlementGraph) Str() string { + nodeHeader := "" + edgeHeader := "edges:" + nodesStrings := make([]string, 0, len(g.Nodes)) + edgeStrings := make([]string, 0, len(g.Edges)) + + for id, node := range g.Nodes { + nodesStrings = append( + nodesStrings, + node.Str(), + ) + if destinationsMap, destinationOK := g.SourcesToDestinations[id]; destinationOK { + for _, edgeID := range destinationsMap { + if edge, edgeOK := g.Edges[edgeID]; edgeOK { + edgeStrings = append( + edgeStrings, + edge.Str(), + ) + } + } + } + } + + return strings.Join( + []string{ + nodeHeader, + strings.Join(nodesStrings, "\n"), + edgeHeader, + strings.Join(edgeStrings, "\n"), + }, + "\n", + ) +} + +// validateEdges validates that for every edge, both nodes actually exists. +func (g *EntitlementGraph) validateEdges() error { + for edgeId, edge := range g.Edges { + if _, ok := g.Nodes[edge.SourceID]; !ok { + return ErrNoEntitlement + } + if _, ok := g.Nodes[edge.DestinationID]; !ok { + return ErrNoEntitlement + } + + if g.SourcesToDestinations[edge.SourceID][edge.DestinationID] != edgeId { + return fmt.Errorf("edge %v does not match source %v to destination %v", edgeId, edge.SourceID, edge.DestinationID) + } + + if g.DestinationsToSources[edge.DestinationID][edge.SourceID] != edgeId { + return fmt.Errorf("edge %v does not match destination %v to source %v", edgeId, edge.DestinationID, edge.SourceID) + } + } + return nil +} + +// validateNodes validates that each node has at least one `entitlementID` and +// that each `entitlementID` only appears once in the graph. +func (g *EntitlementGraph) validateNodes() error { + // check for entitlement ids that are in multiple nodes + seenEntitlements := make(map[string]int) + for nodeID, node := range g.Nodes { + if len(node.EntitlementIDs) == 0 { + return fmt.Errorf("empty node %v", nodeID) + } + for _, entID := range node.EntitlementIDs { + if _, ok := seenEntitlements[entID]; ok { + return fmt.Errorf("entitlement %v is in multiple nodes: %v %v", entID, nodeID, seenEntitlements[entID]) + } + seenEntitlements[entID] = nodeID + entNodeId, ok := g.EntitlementsToNodes[entID] + if !ok { + return fmt.Errorf("entitlement %v is not in EntitlementsToNodes. should be in node %v", entID, nodeID) + } + if entNodeId != nodeID { + return fmt.Errorf("entitlement %v is in node %v but should be in node %v", entID, entNodeId, nodeID) + } + } + } + + for entID, nodeID := range g.EntitlementsToNodes { + node, ok := g.Nodes[nodeID] + if !ok { + return fmt.Errorf("entitlement %v is in EntitlementsToNodes but not in Nodes", entID) + } + if !slices.Contains(node.EntitlementIDs, entID) { + return fmt.Errorf("entitlement %v is in EntitlementsToNodes but not in node %v", entID, nodeID) + } + } + return nil +} + +// Validate checks every node and edge and returns an error if the graph is not valid. +func (g *EntitlementGraph) Validate() error { + if err := g.validateEdges(); err != nil { + return err + } + if err := g.validateNodes(); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sync/state.go b/vendor/github.com/conductorone/baton-sdk/pkg/sync/state.go index 60674f1..df651a4 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/sync/state.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sync/state.go @@ -6,6 +6,8 @@ import ( "fmt" "sync" + "github.com/conductorone/baton-sdk/pkg/sync/expand" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" "go.uber.org/zap" ) @@ -16,7 +18,7 @@ type State interface { NextPage(ctx context.Context, pageToken string) error ResourceTypeID(ctx context.Context) string ResourceID(ctx context.Context) string - EntitlementGraph(ctx context.Context) *EntitlementGraph + EntitlementGraph(ctx context.Context) *expand.EntitlementGraph ParentResourceID(ctx context.Context) string ParentResourceTypeID(ctx context.Context) string PageToken(ctx context.Context) string @@ -52,12 +54,12 @@ func (s ActionOp) String() string { } } -// MarshalJSON marshals the ActionOp insto a json string. +// MarshalJSON marshals the ActionOp into a json string. func (s *ActionOp) MarshalJSON() ([]byte, error) { return json.Marshal(s.String()) } -// UnmarshalJSON unmarshal's the input byte slice and updates this action op. +// UnmarshalJSON unmarshals the input byte slice and updates this action op. func (s *ActionOp) UnmarshalJSON(data []byte) error { var v string err := json.Unmarshal(data, &v) @@ -118,21 +120,21 @@ type state struct { mtx sync.RWMutex actions []Action currentAction *Action - entitlementGraph *EntitlementGraph + entitlementGraph *expand.EntitlementGraph needsExpansion bool } // serializedToken is used to serialize the token to JSON. This separate object is used to avoid having exported fields // on the object used externally. We should interface this, probably. type serializedToken struct { - Actions []Action `json:"actions"` - CurrentAction *Action `json:"current_action"` - NeedsExpansion bool `json:"needs_expansion"` - EntitlementGraph *EntitlementGraph `json:"entitlement_graph"` + Actions []Action `json:"actions"` + CurrentAction *Action `json:"current_action"` + NeedsExpansion bool `json:"needs_expansion"` + EntitlementGraph *expand.EntitlementGraph `json:"entitlement_graph"` } // push adds a new action to the stack. If there is no current state, the action is directly set to current, else -// the current state is appened to the slice of actions, and the new action is set to current. +// the current state is appended to the slice of actions, and the new action is set to current. func (st *state) push(action Action) { st.mtx.Lock() defer st.mtx.Unlock() @@ -291,13 +293,13 @@ func (st *state) ResourceID(ctx context.Context) string { } // EntitlementGraph returns the entitlement graph for the current action. -func (st *state) EntitlementGraph(ctx context.Context) *EntitlementGraph { +func (st *state) EntitlementGraph(ctx context.Context) *expand.EntitlementGraph { c := st.Current() if c == nil { panic("no current state") } if st.entitlementGraph == nil { - st.entitlementGraph = NewEntitlementGraph(ctx) + st.entitlementGraph = expand.NewEntitlementGraph(ctx) } return st.entitlementGraph } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go b/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go index fa0958b..fc4febc 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go @@ -11,6 +11,8 @@ import ( "strconv" "time" + "github.com/conductorone/baton-sdk/pkg/sync/expand" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" "go.uber.org/zap" "google.golang.org/grpc/codes" @@ -80,18 +82,27 @@ func (s *syncer) handleProgress(ctx context.Context, a *Action, c int) { } } +var attempts = 0 + func shouldWaitAndRetry(ctx context.Context, err error) bool { + if err == nil { + attempts = 0 + return true + } if status.Code(err) != codes.Unavailable { return false } + attempts++ l := ctxzap.Extract(ctx) - l.Error("retrying operation", zap.Error(err)) + + var wait time.Duration = time.Duration(attempts) * time.Second + + l.Error("retrying operation", zap.Error(err), zap.Duration("wait", wait)) for { select { - // TODO: this should back off based on error counts - case <-time.After(1 * time.Second): + case <-time.After(wait): return true case <-ctx.Done(): return false @@ -101,8 +112,8 @@ func shouldWaitAndRetry(ctx context.Context, err error) bool { // Sync starts the syncing process. The sync process is driven by the action stack that is part of the state object. // For each page of data that is required to be fetched from the connector, a new action is pushed on to the stack. Once -// an action is completed, it is popped off of the queue. Before procesing each action, we checkpoint the state object -// into the datasouce. This allows for graceful resumes if a sync is interrupted. +// an action is completed, it is popped off of the queue. Before processing each action, we checkpoint the state object +// into the datasource. This allows for graceful resumes if a sync is interrupted. func (s *syncer) Sync(ctx context.Context) error { l := ctxzap.Extract(ctx) @@ -189,28 +200,28 @@ func (s *syncer) Sync(ctx context.Context) error { case SyncResourceTypesOp: err = s.SyncResourceTypes(ctx) - if err != nil && !shouldWaitAndRetry(ctx, err) { + if !shouldWaitAndRetry(ctx, err) { return err } continue case SyncResourcesOp: err = s.SyncResources(ctx) - if err != nil && !shouldWaitAndRetry(ctx, err) { + if !shouldWaitAndRetry(ctx, err) { return err } continue case SyncEntitlementsOp: err = s.SyncEntitlements(ctx) - if err != nil && !shouldWaitAndRetry(ctx, err) { + if !shouldWaitAndRetry(ctx, err) { return err } continue case SyncGrantsOp: err = s.SyncGrants(ctx) - if err != nil && !shouldWaitAndRetry(ctx, err) { + if !shouldWaitAndRetry(ctx, err) { return err } continue @@ -230,7 +241,7 @@ func (s *syncer) Sync(ctx context.Context) error { } err = s.SyncGrantExpansion(ctx) - if err != nil && !shouldWaitAndRetry(ctx, err) { + if !shouldWaitAndRetry(ctx, err) { return err } continue @@ -273,11 +284,9 @@ func (s *syncer) SyncResourceTypes(ctx context.Context) error { return err } - for _, rt := range resp.List { - err = s.store.PutResourceType(ctx, rt) - if err != nil { - return err - } + err = s.store.PutResourceTypes(ctx, resp.List...) + if err != nil { + return err } s.handleProgress(ctx, s.state.Current(), len(resp.List)) @@ -382,6 +391,7 @@ func (s *syncer) syncResources(ctx context.Context) error { } } + bulkPutResoruces := []*v2.Resource{} for _, r := range resp.List { // Check if we've already synced this resource, skip it if we have _, err = s.store.GetResource(ctx, &reader_v2.ResourcesReaderServiceGetResourceRequest{ @@ -403,12 +413,16 @@ func (s *syncer) syncResources(ctx context.Context) error { // Set the resource creation source r.CreationSource = v2.Resource_CREATION_SOURCE_CONNECTOR_LIST_RESOURCES - err = s.store.PutResource(ctx, r) + bulkPutResoruces = append(bulkPutResoruces, r) + + err = s.getSubResources(ctx, r) if err != nil { return err } + } - err = s.getSubResources(ctx, r) + if len(bulkPutResoruces) > 0 { + err = s.store.PutResources(ctx, bulkPutResoruces...) if err != nil { return err } @@ -480,7 +494,7 @@ func (s *syncer) shouldSkipEntitlementsAndGrants(ctx context.Context, r *v2.Reso } // SyncEntitlements fetches the entitlements from the connector. It first lists each resource from the datastore, -// and pushes an action to fetch the entitelments for each resource. +// and pushes an action to fetch the entitlements for each resource. func (s *syncer) SyncEntitlements(ctx context.Context) error { if s.state.ResourceTypeID(ctx) == "" && s.state.ResourceID(ctx) == "" { pageToken := s.state.PageToken(ctx) @@ -548,11 +562,9 @@ func (s *syncer) syncEntitlementsForResource(ctx context.Context, resourceID *v2 if err != nil { return err } - for _, e := range resp.List { - err = s.store.PutEntitlement(ctx, e) - if err != nil { - return err - } + err = s.store.PutEntitlements(ctx, resp.List...) + if err != nil { + return err } s.handleProgress(ctx, s.state.Current(), len(resp.List)) @@ -810,9 +822,13 @@ func (s *syncer) SyncGrantExpansion(ctx context.Context) error { } if entitlementGraph.Loaded { - cycles, hasCycles := entitlementGraph.GetCycles() - if hasCycles { - l.Warn("cycles detected in entitlement graph", zap.Any("cycles", cycles)) + cycle := entitlementGraph.GetFirstCycle() + if cycle != nil { + l.Warn( + "cycle detected in entitlement graph", + zap.Any("cycle", cycle), + zap.Any("initial graph", entitlementGraph), + ) if dontFixCycles { return fmt.Errorf("cycles detected in entitlement graph") } @@ -883,7 +899,7 @@ func (s *syncer) SyncGrants(ctx context.Context) error { return nil } -type lastestSyncFetcher interface { +type latestSyncFetcher interface { LatestFinishedSync(ctx context.Context) (string, error) } @@ -893,7 +909,7 @@ func (s *syncer) fetchResourceForPreviousSync(ctx context.Context, resourceID *v var previousSyncID string var err error - if psf, ok := s.store.(lastestSyncFetcher); ok { + if psf, ok := s.store.(latestSyncFetcher); ok { previousSyncID, err = psf.LatestFinishedSync(ctx) if err != nil { return "", nil, err @@ -1050,11 +1066,10 @@ func (s *syncer) syncGrantsForResource(ctx context.Context, resourceID *v2.Resou if grantAnnos.Contains(&v2.GrantExpandable{}) { s.state.SetNeedsExpansion() } - - err = s.store.PutGrant(ctx, grant) - if err != nil { - return err - } + } + err = s.store.PutGrants(ctx, grants...) + if err != nil { + return err } s.handleProgress(ctx, s.state.Current(), len(grants)) @@ -1080,7 +1095,7 @@ func (s *syncer) syncGrantsForResource(ctx context.Context, resourceID *v2.Resou if updatedETag != nil { resourceAnnos.Update(updatedETag) resource.Annotations = resourceAnnos - err = s.store.PutResource(ctx, resource) + err = s.store.PutResources(ctx, resource) if err != nil { return err } @@ -1246,7 +1261,7 @@ func (s *syncer) runGrantExpandActions(ctx context.Context) (bool, error) { zap.Any("sources", sources), ) - err = s.store.PutGrant(ctx, descendantGrant) + err = s.store.PutGrants(ctx, descendantGrant) if err != nil { l.Error("runGrantExpandActions: error updating descendant grant", zap.Error(err)) return false, fmt.Errorf("runGrantExpandActions: error updating descendant grant: %w", err) @@ -1300,13 +1315,17 @@ func (s *syncer) expandGrantsForEntitlements(ctx context.Context) error { } if graph.Depth > maxDepth { - l.Error("expandGrantsForEntitlements: exceeded max depth", zap.Any("graph", graph), zap.Int("max_depth", maxDepth)) + l.Error( + "expandGrantsForEntitlements: exceeded max depth", + zap.Any("graph", graph), + zap.Int("max_depth", maxDepth), + ) s.state.FinishAction(ctx) return fmt.Errorf("exceeded max depth") } - // TOOD(morgabra) Yield here after some amount of work? - // traverse edges or call some sort of getentitlements + // TODO(morgabra) Yield here after some amount of work? + // traverse edges or call some sort of getEntitlements for _, sourceEntitlementID := range graph.GetEntitlements() { // We've already expanded this entitlement, so skip it. if graph.IsEntitlementExpanded(sourceEntitlementID) { @@ -1320,14 +1339,14 @@ func (s *syncer) expandGrantsForEntitlements(ctx context.Context) error { } for descendantEntitlementID, grantInfo := range graph.GetDescendantEntitlements(sourceEntitlementID) { - if grantInfo.Expanded { + if grantInfo.IsExpanded { continue } - graph.Actions = append(graph.Actions, EntitlementGraphAction{ + graph.Actions = append(graph.Actions, expand.EntitlementGraphAction{ SourceEntitlementID: sourceEntitlementID, DescendantEntitlementID: descendantEntitlementID, PageToken: "", - Shallow: grantInfo.Shallow, + Shallow: grantInfo.IsShallow, ResourceTypeIDs: grantInfo.ResourceTypeIDs, }) } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_ticket.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_ticket.go index 27e4596..b4af952 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_ticket.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_ticket.go @@ -29,8 +29,8 @@ func (c *createTicketTaskHandler) HandleTask(ctx context.Context) error { t := c.task.GetCreateTicketTask() if t == nil || t.GetTicketRequest() == nil { - l.Error("create ticket task was nil or missing ticket request", zap.Any("create_resource_task", t)) - return c.helpers.FinishTask(ctx, nil, nil, errors.Join(errors.New("malformed create ticket task"), ErrTaskNonRetryable)) + l.Error("create ticket task was nil or missing ticket request", zap.Any("create_ticket_task", t)) + return c.helpers.FinishTask(ctx, nil, t.GetAnnotations(), errors.Join(errors.New("malformed create ticket task"), ErrTaskNonRetryable)) } cc := c.helpers.ConnectorClient() @@ -41,10 +41,15 @@ func (c *createTicketTaskHandler) HandleTask(ctx context.Context) error { }) if err != nil { l.Error("failed creating ticket", zap.Error(err)) - return c.helpers.FinishTask(ctx, nil, nil, errors.Join(err, ErrTaskNonRetryable)) + return c.helpers.FinishTask(ctx, nil, t.GetAnnotations(), err) } - return c.helpers.FinishTask(ctx, resp, resp.GetAnnotations(), nil) + respAnnos := annotations.Annotations(resp.GetAnnotations()) + respAnnos.Merge(t.GetAnnotations()...) + + resp.Annotations = respAnnos + + return c.helpers.FinishTask(ctx, resp, respAnnos, nil) } func newCreateTicketTaskHandler(task *v1.Task, helpers createTicketTaskHelpers) *createTicketTaskHandler { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/get_ticket.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/get_ticket.go index 09bd185..18ce23e 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/get_ticket.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/get_ticket.go @@ -2,7 +2,7 @@ package c1api import ( "context" - "fmt" + "errors" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" v1 "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1" @@ -28,24 +28,35 @@ func (c *getTicketTaskHandler) HandleTask(ctx context.Context) error { cc := c.helpers.ConnectorClient() + t := c.task.GetGetTicket() + if t == nil || t.GetTicketId() == "" { + l.Error("get ticket task was nil or missing ticket id", zap.Any("get_ticket_task", t)) + return c.helpers.FinishTask(ctx, nil, nil, errors.Join(errors.New("malformed get ticket task"), ErrTaskNonRetryable)) + } + ticket, err := cc.GetTicket(ctx, &v2.TicketsServiceGetTicketRequest{ - Id: c.task.GetId(), + Id: t.GetTicketId(), }) if err != nil { - return err + return c.helpers.FinishTask(ctx, nil, t.GetAnnotations(), err) } if ticket.GetTicket() == nil { - return fmt.Errorf("connector returned empt ticket schema") + return c.helpers.FinishTask(ctx, nil, t.GetAnnotations(), errors.Join(errors.New("connector returned empty ticket"), ErrTaskNonRetryable)) } resp := &v2.TicketsServiceGetTicketResponse{ Ticket: ticket.GetTicket(), } + respAnnos := annotations.Annotations(resp.GetAnnotations()) + respAnnos.Merge(t.GetAnnotations()...) + + resp.Annotations = respAnnos + l.Debug("GetTicket response", zap.Any("resp", resp)) - return c.helpers.FinishTask(ctx, resp, resp.GetAnnotations(), nil) + return c.helpers.FinishTask(ctx, resp, respAnnos, nil) } func newGetTicketTaskHandler(task *v1.Task, helpers getTicketTaskHelpers) *getTicketTaskHandler { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/list_ticket_schemas.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/list_ticket_schemas.go index a539d1b..d7c5c88 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/list_ticket_schemas.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/list_ticket_schemas.go @@ -60,7 +60,7 @@ func (c *listTicketSchemasTaskHandler) HandleTask(ctx context.Context) error { if err != nil { l.Error("failed listing ticket schemas", zap.Error(err)) - return c.helpers.FinishTask(ctx, nil, nil, errors.Join(err, ErrTaskNonRetryable)) + return c.helpers.FinishTask(ctx, nil, nil, err) } resp := &v2.TicketsServiceListTicketSchemasResponse{ diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/manager.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/manager.go index 8dc532b..e1f2a9f 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/manager.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/manager.go @@ -20,6 +20,7 @@ import ( v1 "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1" "github.com/conductorone/baton-sdk/pkg/tasks" "github.com/conductorone/baton-sdk/pkg/types" + taskTypes "github.com/conductorone/baton-sdk/pkg/types/tasks" ) var ( @@ -112,7 +113,7 @@ func (c *c1ApiTaskManager) Next(ctx context.Context) (*v1.Task, time.Duration, e nextPoll := getNextPoll(resp.GetNextPoll().AsDuration()) l = l.With(zap.Duration("next_poll", nextPoll)) - if resp.GetTask() == nil || tasks.Is(resp.GetTask(), tasks.NoneType) { + if resp.GetTask() == nil || tasks.Is(resp.GetTask(), taskTypes.NoneType) { l.Debug("c1_api_task_manager.Next(): no tasks available") return nil, nextPoll, nil } @@ -220,34 +221,34 @@ func (c *c1ApiTaskManager) Process(ctx context.Context, task *v1.Task, cc types. // Handlers may do their work in a goroutine allowing processing to move onto the next task var handler tasks.TaskHandler switch tasks.GetType(task) { - case tasks.FullSyncType: + case taskTypes.FullSyncType: handler = newFullSyncTaskHandler(task, tHelpers) - case tasks.HelloType: + case taskTypes.HelloType: handler = newHelloTaskHandler(task, tHelpers) - case tasks.GrantType: + case taskTypes.GrantType: handler = newGrantTaskHandler(task, tHelpers) - case tasks.RevokeType: + case taskTypes.RevokeType: handler = newRevokeTaskHandler(task, tHelpers) - case tasks.CreateAccountType: + case taskTypes.CreateAccountType: handler = newCreateAccountTaskHandler(task, tHelpers) - case tasks.CreateResourceType: + case taskTypes.CreateResourceType: handler = newCreateResourceTaskHandler(task, tHelpers) - case tasks.DeleteResourceType: + case taskTypes.DeleteResourceType: handler = newDeleteResourceTaskHandler(task, tHelpers) - case tasks.RotateCredentialsType: + case taskTypes.RotateCredentialsType: handler = newRotateCredentialsTaskHandler(task, tHelpers) - case tasks.CreateTicketType: + case taskTypes.CreateTicketType: handler = newCreateTicketTaskHandler(task, tHelpers) - case tasks.ListTicketSchemasType: + case taskTypes.ListTicketSchemasType: handler = newListSchemasTaskHandler(task, tHelpers) - case tasks.GetTicketType: + case taskTypes.GetTicketType: handler = newGetTicketTaskHandler(task, tHelpers) default: return c.finishTask(ctx, task, nil, nil, errors.New("unsupported task type")) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/ticket.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/ticket.go index f0085ce..01d93a7 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/ticket.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/ticket.go @@ -7,6 +7,7 @@ import ( "sync" "time" + "github.com/conductorone/baton-sdk/pkg/types/resource" "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" "go.uber.org/zap" @@ -24,13 +25,14 @@ type localCreateTicket struct { } type ticketTemplate struct { - SchemaID string `json:"schema_id"` - StatusId string `json:"status_id"` - TypeId string `json:"type_id"` - DisplayName string `json:"display_name"` - Description string `json:"description"` - Labels []string `json:"labels"` - CustomFields map[string]interface{} `json:"custom_fields"` + SchemaID string `json:"schema_id"` + StatusId string `json:"status_id"` + TypeId string `json:"type_id"` + DisplayName string `json:"display_name"` + Description string `json:"description"` + Labels []string `json:"labels"` + CustomFields map[string]interface{} `json:"custom_fields"` + RequestedForId string `json:"requested_for_id"` } func (m *localCreateTicket) loadTicketTemplate(ctx context.Context) (*ticketTemplate, error) { @@ -78,13 +80,28 @@ func (m *localCreateTicket) Process(ctx context.Context, task *v1.Task, cc types ticketRequestBody := &v2.TicketRequest{ DisplayName: template.DisplayName, Description: template.Description, - Status: &v2.TicketStatus{ - Id: template.StatusId, - }, - Type: &v2.TicketType{ + Labels: template.Labels, + } + + if template.TypeId != "" { + ticketRequestBody.Type = &v2.TicketType{ Id: template.TypeId, - }, - Labels: template.Labels, + } + } + + if template.StatusId != "" { + ticketRequestBody.Status = &v2.TicketStatus{ + Id: template.StatusId, + } + } + + if template.RequestedForId != "" { + rt := resource.NewResourceType("User", []v2.ResourceType_Trait{v2.ResourceType_TRAIT_USER}) + requestedUser, err := resource.NewUserResource(template.RequestedForId, rt, template.RequestedForId, []resource.UserTraitOption{}) + if err != nil { + return err + } + ticketRequestBody.RequestedFor = requestedUser } cfs := make(map[string]*v2.TicketCustomField) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/tasks.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/tasks.go index 8a60d5c..b2071d2 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/tasks.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/tasks.go @@ -6,6 +6,7 @@ import ( v1 "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1" "github.com/conductorone/baton-sdk/pkg/types" + taskTypes "github.com/conductorone/baton-sdk/pkg/types/tasks" ) type Manager interface { @@ -17,59 +18,7 @@ type TaskHandler interface { HandleTask(ctx context.Context) error } -type TaskType uint8 - -func (tt TaskType) String() string { - switch tt { - case FullSyncType: - return "sync_full" - case GrantType: - return "grant" - case RevokeType: - return "revoke" - case HelloType: - return "hello" - case EventFeedType: - return "event_feed" - case NoneType: - return "none" - case CreateAccountType: - return "create_account" - case CreateResourceType: - return "create_resource" - case DeleteResourceType: - return "delete_resource" - case RotateCredentialsType: - return "rotate_credential" - case CreateTicketType: - return "create_ticket" - case ListTicketSchemasType: - return "list_ticket_schemas" - case GetTicketType: - return "get_ticket" - default: - return "unknown" - } -} - -const ( - UnknownType TaskType = iota - NoneType - FullSyncType - GrantType - RevokeType - HelloType - CreateAccountType - CreateResourceType - DeleteResourceType - RotateCredentialsType - EventFeedType - CreateTicketType - ListTicketSchemasType - GetTicketType -) - -func Is(task *v1.Task, target TaskType) bool { +func Is(task *v1.Task, target taskTypes.TaskType) bool { if task == nil || task.TaskType == nil { return false } @@ -77,31 +26,31 @@ func Is(task *v1.Task, target TaskType) bool { var ok bool switch target { - case FullSyncType: + case taskTypes.FullSyncType: _, ok = task.GetTaskType().(*v1.Task_SyncFull) - case GrantType: + case taskTypes.GrantType: _, ok = task.GetTaskType().(*v1.Task_Grant) - case RevokeType: + case taskTypes.RevokeType: _, ok = task.GetTaskType().(*v1.Task_Revoke) - case HelloType: + case taskTypes.HelloType: _, ok = task.GetTaskType().(*v1.Task_Hello) - case EventFeedType: + case taskTypes.EventFeedType: _, ok = task.GetTaskType().(*v1.Task_EventFeed) - case NoneType: + case taskTypes.NoneType: _, ok = task.GetTaskType().(*v1.Task_None) - case CreateAccountType: + case taskTypes.CreateAccountType: _, ok = task.GetTaskType().(*v1.Task_CreateAccount) - case CreateResourceType: + case taskTypes.CreateResourceType: _, ok = task.GetTaskType().(*v1.Task_CreateResource) - case DeleteResourceType: + case taskTypes.DeleteResourceType: _, ok = task.GetTaskType().(*v1.Task_DeleteResource) - case RotateCredentialsType: + case taskTypes.RotateCredentialsType: _, ok = task.GetTaskType().(*v1.Task_RotateCredentials) - case CreateTicketType: + case taskTypes.CreateTicketType: _, ok = task.GetTaskType().(*v1.Task_CreateTicketTask_) - case ListTicketSchemasType: + case taskTypes.ListTicketSchemasType: _, ok = task.GetTaskType().(*v1.Task_ListTicketSchemas) - case GetTicketType: + case taskTypes.GetTicketType: _, ok = task.GetTaskType().(*v1.Task_GetTicket) default: return false @@ -110,39 +59,39 @@ func Is(task *v1.Task, target TaskType) bool { return ok } -func GetType(task *v1.Task) TaskType { +func GetType(task *v1.Task) taskTypes.TaskType { if task == nil || task.TaskType == nil { - return UnknownType + return taskTypes.UnknownType } switch task.GetTaskType().(type) { case *v1.Task_SyncFull: - return FullSyncType + return taskTypes.FullSyncType case *v1.Task_Grant: - return GrantType + return taskTypes.GrantType case *v1.Task_Revoke: - return RevokeType + return taskTypes.RevokeType case *v1.Task_Hello: - return HelloType + return taskTypes.HelloType case *v1.Task_EventFeed: - return EventFeedType + return taskTypes.EventFeedType case *v1.Task_None: - return NoneType + return taskTypes.NoneType case *v1.Task_CreateAccount: - return CreateAccountType + return taskTypes.CreateAccountType case *v1.Task_CreateResource: - return CreateResourceType + return taskTypes.CreateResourceType case *v1.Task_DeleteResource: - return DeleteResourceType + return taskTypes.DeleteResourceType case *v1.Task_RotateCredentials: - return RotateCredentialsType + return taskTypes.RotateCredentialsType case *v1.Task_CreateTicketTask_: - return CreateTicketType + return taskTypes.CreateTicketType case *v1.Task_ListTicketSchemas: - return ListTicketSchemasType + return taskTypes.ListTicketSchemasType case *v1.Task_GetTicket: - return GetTicketType + return taskTypes.GetTicketType default: - return UnknownType + return taskTypes.UnknownType } } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/tasks/tasks.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/tasks/tasks.go new file mode 100644 index 0000000..a98d908 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/tasks/tasks.go @@ -0,0 +1,74 @@ +package tasks + +type TaskType uint8 + +func (tt TaskType) String() string { + switch tt { + case FullSyncType: + return "sync_full" + case GrantType: + return "grant" + case RevokeType: + return "revoke" + case HelloType: + return "hello" + case EventFeedType: + return "event_feed" + case NoneType: + return "none" + case CreateAccountType: + return "create_account" + case CreateResourceType: + return "create_resource" + case DeleteResourceType: + return "delete_resource" + case RotateCredentialsType: + return "rotate_credential" + case CreateTicketType: + return "create_ticket" + case ListTicketSchemasType: + return "list_ticket_schemas" + case GetTicketType: + return "get_ticket" + case GetTicketSchemaType: + return "get_ticket_schema" + case ListResourceTypesType: + return "list_resource_types" + case ListResourcesType: + return "list_resources" + case ListEntitlementsType: + return "list_entitlements" + case ListGrantsType: + return "list_grants" + case GetMetadataType: + return "get_metadata" + case ListEventsType: + return "list_events" + default: + return "unknown" + } +} + +const ( + UnknownType TaskType = iota + NoneType + FullSyncType + GrantType + RevokeType + HelloType + CreateAccountType + CreateResourceType + DeleteResourceType + RotateCredentialsType + EventFeedType + CreateTicketType + ListTicketSchemasType + GetTicketType + GetTicketSchemaType + ListResourceTypesType + ListResourcesType + ListEntitlementsType + ListGrantsType + GetMetadataType + ListEventsType +) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/ticket/custom_fields.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/ticket/custom_fields.go index cdb20bb..8d251c6 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/ticket/custom_fields.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/ticket/custom_fields.go @@ -15,6 +15,8 @@ import ( v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" ) +var ErrFieldNil = errors.New("error: field is nil") + // CustomFieldForSchemaField returns a typed custom field for a given schema field. func CustomFieldForSchemaField(id string, schema *v2.TicketSchema, value interface{}) (*v2.TicketCustomField, error) { field, ok := schema.GetCustomFields()[id] @@ -116,7 +118,7 @@ func CustomFieldForSchemaField(id string, schema *v2.TicketSchema, value interfa func GetStringValue(field *v2.TicketCustomField) (string, error) { if field == nil { - return "", errors.New("error: field is nil") + return "", ErrFieldNil } v, ok := field.GetValue().(*v2.TicketCustomField_StringValue) if !ok { @@ -127,7 +129,7 @@ func GetStringValue(field *v2.TicketCustomField) (string, error) { func GetStringsValue(field *v2.TicketCustomField) ([]string, error) { if field == nil { - return nil, errors.New("error: field is nil") + return nil, ErrFieldNil } v, ok := field.GetValue().(*v2.TicketCustomField_StringValues) if !ok { @@ -138,7 +140,7 @@ func GetStringsValue(field *v2.TicketCustomField) ([]string, error) { func GetBoolValue(field *v2.TicketCustomField) (bool, error) { if field == nil { - return false, errors.New("error: field is nil") + return false, ErrFieldNil } v, ok := field.GetValue().(*v2.TicketCustomField_BoolValue) if !ok { @@ -149,7 +151,7 @@ func GetBoolValue(field *v2.TicketCustomField) (bool, error) { func GetTimestampValue(field *v2.TicketCustomField) (time.Time, error) { if field == nil { - return time.Time{}, errors.New("error: field is nil") + return time.Time{}, ErrFieldNil } v, ok := field.GetValue().(*v2.TicketCustomField_TimestampValue) if !ok { @@ -160,7 +162,7 @@ func GetTimestampValue(field *v2.TicketCustomField) (time.Time, error) { func GetPickStringValue(field *v2.TicketCustomField) (string, error) { if field == nil { - return "", errors.New("error: field is nil") + return "", ErrFieldNil } v, ok := field.GetValue().(*v2.TicketCustomField_PickStringValue) if !ok { @@ -171,7 +173,7 @@ func GetPickStringValue(field *v2.TicketCustomField) (string, error) { func GetPickMultipleStringValues(field *v2.TicketCustomField) ([]string, error) { if field == nil { - return nil, errors.New("error: field is nil") + return nil, ErrFieldNil } v, ok := field.GetValue().(*v2.TicketCustomField_PickMultipleStringValues) if !ok { @@ -182,7 +184,7 @@ func GetPickMultipleStringValues(field *v2.TicketCustomField) ([]string, error) func GetPickObjectValue(field *v2.TicketCustomField) (*v2.TicketCustomFieldObjectValue, error) { if field == nil { - return nil, errors.New("error: field is nil") + return nil, ErrFieldNil } v, ok := field.GetValue().(*v2.TicketCustomField_PickObjectValue) if !ok { @@ -193,7 +195,7 @@ func GetPickObjectValue(field *v2.TicketCustomField) (*v2.TicketCustomFieldObjec func GetPickMultipleObjectValues(field *v2.TicketCustomField) ([]*v2.TicketCustomFieldObjectValue, error) { if field == nil { - return nil, errors.New("error: field is nil") + return nil, ErrFieldNil } v, ok := field.GetValue().(*v2.TicketCustomField_PickMultipleObjectValues) if !ok { @@ -242,38 +244,37 @@ func GetCustomFieldValue(field *v2.TicketCustomField) (interface{}, error) { func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Ticket) (bool, error) { l := ctxzap.Extract(ctx) - // Look for a matching status - foundMatch := false - for _, status := range schema.GetStatuses() { - if ticket.Status == nil { - l.Debug("error: invalid ticket: status is not set") - return false, nil - } - if ticket.Status.GetId() == status.GetId() { - foundMatch = true - break + // Validate the ticket status is one defined in the schema + // Ticket status is not required so if a ticket doesn't have a status + // we don't need to validate, skip the loop in this case + validTicketStatus := ticket.Status == nil + if !validTicketStatus { + for _, status := range schema.GetStatuses() { + if ticket.Status.GetId() == status.GetId() { + validTicketStatus = true + break + } } } - - if !foundMatch { + if !validTicketStatus { l.Debug("error: invalid ticket: could not find status", zap.String("status_id", ticket.Status.GetId())) return false, nil } - // Look for a matching ticket type - foundMatch = false - for _, tType := range schema.GetTypes() { - if ticket.Type == nil { - return false, nil - } - if ticket.Type.GetId() == tType.GetId() { - foundMatch = true - break + // Validate the ticket type is one defined in the schema + // Ticket type is not required so if a ticket doesn't have a type + // we don't need to validate, skip the loop in this case + validTicketType := ticket.Type == nil + if !validTicketType { + for _, tType := range schema.GetTypes() { + if ticket.Type.GetId() == tType.GetId() { + validTicketType = true + break + } } } - - if !foundMatch { - l.Debug("error: invalid ticket: could not find ticket type", zap.String("ticket_type_id", ticket.Status.GetId())) + if !validTicketType { + l.Debug("error: invalid ticket: could not find ticket type", zap.String("ticket_type_id", ticket.Type.GetId())) return false, nil } @@ -300,7 +301,7 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic return false, nil } - if cf.Required && tv.StringValue.Value == "" { + if cf.Required && tv.StringValue.GetValue() == "" { l.Debug("error: invalid ticket: string value is required but was empty", zap.String("custom_field_id", cf.Id)) return false, nil } @@ -312,7 +313,7 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic return false, nil } - if cf.Required && len(tv.StringValues.Values) == 0 { + if cf.Required && len(tv.StringValues.GetValues()) == 0 { l.Debug("error: invalid ticket: string values is required but was empty", zap.String("custom_field_id", cf.Id)) return false, nil } @@ -331,7 +332,7 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic return false, nil } - if cf.Required && tv.TimestampValue.Value == nil { + if cf.Required && tv.TimestampValue.GetValue() == nil { l.Debug("error: invalid ticket: expected timestamp value for field but was empty", zap.String("custom_field_id", cf.Id)) return false, nil } @@ -346,7 +347,13 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic ticketValue := tv.PickStringValue.GetValue() allowedValues := v.PickStringValue.GetAllowedValues() - if cf.Required && ticketValue == "" { + // String value is empty but custom field is not required, skip further validation + if !cf.Required && ticketValue == "" { + continue + } + + // Custom field is required, check if string is empty + if ticketValue == "" { l.Debug("error: invalid ticket: expected string value for field but was empty", zap.String("custom_field_id", cf.Id)) return false, nil } @@ -356,7 +363,7 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic return false, nil } - foundMatch = false + foundMatch := false for _, m := range allowedValues { if m == ticketValue { foundMatch = true @@ -383,7 +390,13 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic ticketValues := tv.PickMultipleStringValues.GetValues() allowedValues := v.PickMultipleStringValues.GetAllowedValues() - if cf.Required && len(ticketValues) == 0 { + // String values are empty but custom field is not required, skip further validation + if !cf.Required && len(ticketValues) == 0 { + continue + } + + // Custom field is required so check if string values are empty + if len(ticketValues) == 0 { l.Debug("error: invalid ticket: string values is required but was empty", zap.String("custom_field_id", cf.Id)) return false, nil } @@ -421,7 +434,13 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic ticketValue := tv.PickObjectValue.GetValue() allowedValues := v.PickObjectValue.GetAllowedValues() - if cf.Required && ticketValue == nil || ticketValue.GetId() == "" { + // Object value for field is nil, but custom field is not required, skip further validation + if !cf.Required && (ticketValue == nil || ticketValue.GetId() == "") { + continue + } + + // Custom field is required so check if object value for field is nil + if ticketValue == nil || ticketValue.GetId() == "" { l.Debug("error: invalid ticket: expected object value for field but was nil", zap.String("custom_field_id", cf.Id)) return false, nil } @@ -431,7 +450,7 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic return false, nil } - foundMatch = false + foundMatch := false for _, m := range allowedValues { if m.GetId() == ticketValue.GetId() { foundMatch = true @@ -458,7 +477,13 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic ticketValues := tv.PickMultipleObjectValues.GetValues() allowedValues := v.PickMultipleObjectValues.GetAllowedValues() - if cf.Required && len(ticketValues) == 0 { + // Object values are empty but custom field is not required, skip further validation + if !cf.Required && len(ticketValues) == 0 { + continue + } + + // Custom field is required so check if object values are empty + if len(ticketValues) == 0 { l.Debug("error: invalid ticket: object values is required but was empty", zap.String("custom_field_id", cf.Id)) return false, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/authcredentials.go b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/authcredentials.go index a17624c..6addfbc 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/authcredentials.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/authcredentials.go @@ -7,6 +7,7 @@ import ( "net/http" "net/url" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" "golang.org/x/oauth2" "golang.org/x/oauth2/clientcredentials" "golang.org/x/oauth2/jwt" @@ -146,7 +147,7 @@ func (o *OAuth2JWT) GetClient(ctx context.Context, options ...Option) (*http.Cli } func getHttpClient(ctx context.Context, options ...Option) (*http.Client, error) { - options = append(options, WithLogger(true, nil)) + options = append(options, WithLogger(true, ctxzap.Extract(ctx))) httpClient, err := NewClient(ctx, options...) if err != nil { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/transport.go b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/transport.go index a4be4d0..eb08761 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/transport.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/transport.go @@ -21,6 +21,8 @@ func NewTransport(ctx context.Context, options ...Option) (*Transport, error) { for _, opt := range options { opt.Apply(t) } + t.userAgent = t.userAgent + " baton-sdk/" + sdk.Version + _, err := t.cycle(ctx) if err != nil { return nil, err @@ -104,7 +106,6 @@ func (t *Transport) make(ctx context.Context) (http.RoundTripper, error) { return nil, err } var rv http.RoundTripper = baseTransport - t.userAgent = t.userAgent + " baton-sdk/" + sdk.Version rv = &userAgentTripper{next: rv, userAgent: t.userAgent} return rv, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/wrapper.go b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/wrapper.go index 2b4a1e2..0e4fa4e 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/wrapper.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/wrapper.go @@ -45,11 +45,17 @@ func NewBaseHttpClient(httpClient *http.Client) *BaseHttpClient { } } +// WithJSONResponse is a wrapper that marshals the returned response body into +// the provided shape. If the API should return an empty JSON body (i.e. HTTP +// status code 204 No Content), then pass a `nil` to `response`. func WithJSONResponse(response interface{}) DoOption { return func(resp *WrapperResponse) error { if !helpers.IsJSONContentType(resp.Header.Get(ContentType)) { return fmt.Errorf("unexpected content type for json response: %s", resp.Header.Get(ContentType)) } + if response == nil && len(resp.Body) == 0 { + return nil + } return json.Unmarshal(resp.Body, response) } } @@ -101,6 +107,9 @@ func WithXMLResponse(response interface{}) DoOption { if !helpers.IsXMLContentType(resp.Header.Get(ContentType)) { return fmt.Errorf("unexpected content type for xml response: %s", resp.Header.Get(ContentType)) } + if response == nil && len(resp.Body) == 0 { + return nil + } return xml.Unmarshal(resp.Body, response) } } diff --git a/vendor/github.com/deckarep/golang-set/v2/.gitignore b/vendor/github.com/deckarep/golang-set/v2/.gitignore new file mode 100644 index 0000000..4eb156d --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/v2/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +.idea \ No newline at end of file diff --git a/vendor/github.com/deckarep/golang-set/v2/LICENSE b/vendor/github.com/deckarep/golang-set/v2/LICENSE new file mode 100644 index 0000000..efd4827 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/v2/LICENSE @@ -0,0 +1,22 @@ +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 - 2022 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/deckarep/golang-set/v2/README.md b/vendor/github.com/deckarep/golang-set/v2/README.md new file mode 100644 index 0000000..921f0ce --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/v2/README.md @@ -0,0 +1,181 @@ +![example workflow](https://github.com/deckarep/golang-set/actions/workflows/ci.yml/badge.svg) +[![Go Report Card](https://goreportcard.com/badge/github.com/deckarep/golang-set/v2)](https://goreportcard.com/report/github.com/deckarep/golang-set/v2) +[![GoDoc](https://godoc.org/github.com/deckarep/golang-set/v2?status.svg)](http://godoc.org/github.com/deckarep/golang-set/v2) + +# golang-set + +The missing `generic` set collection for the Go language. Until Go has sets built-in...use this. + +## Update 3/5/2023 +* Packaged version: `2.2.0` release includes a refactor to minimize pointer indirection, better method documentation standards and a few constructor convenience methods to increase ergonomics when appending items `Append` or creating a new set from an exist `Map`. +* supports `new generic` syntax +* Go `1.18.0` or higher +* Workflow tested on Go `1.20` + +![With Generics](new_improved.jpeg) + +Coming from Python one of the things I miss is the superbly wonderful set collection. This is my attempt to mimic the primary features of the set collection from Python. +You can of course argue that there is no need for a set in Go, otherwise the creators would have added one to the standard library. To those I say simply ignore this repository and carry-on and to the rest that find this useful please contribute in helping me make it better by contributing with suggestions or PRs. + +## Install + +Use `go get` to install this package. + +```shell +go get github.com/deckarep/golang-set/v2 +``` + +## Features + +* *NEW* [Generics](https://go.dev/doc/tutorial/generics) based implementation (requires [Go 1.18](https://go.dev/blog/go1.18beta1) or higher) +* One common *interface* to both implementations + * a **non threadsafe** implementation favoring *performance* + * a **threadsafe** implementation favoring *concurrent* use +* Feature complete set implementation modeled after [Python's set implementation](https://docs.python.org/3/library/stdtypes.html#set). +* Exhaustive unit-test and benchmark suite + +## Trusted by + +This package is trusted by many companies and thousands of open-source packages. Here are just a few sample users of this package. + +* Notable projects/companies using this package + * Ethereum + * Docker + * 1Password + * Hashicorp + +## Star History + +[![Star History Chart](https://api.star-history.com/svg?repos=deckarep/golang-set&type=Date)](https://star-history.com/#deckarep/golang-set&Date) + + +## Usage + +The code below demonstrates how a Set collection can better manage data and actually minimize boilerplate and needless loops in code. This package now fully supports *generic* syntax so you are now able to instantiate a collection for any [comparable](https://flaviocopes.com/golang-comparing-values/) type object. + +What is considered comparable in Go? +* `Booleans`, `integers`, `strings`, `floats` or basically primitive types. +* `Pointers` +* `Arrays` +* `Structs` if *all of their fields* are also comparable independently + +Using this library is as simple as creating either a threadsafe or non-threadsafe set and providing a `comparable` type for instantiation of the collection. + +```go +// Syntax example, doesn't compile. +mySet := mapset.NewSet[T]() // where T is some concrete comparable type. + +// Therefore this code creates an int set +mySet := mapset.NewSet[int]() + +// Or perhaps you want a string set +mySet := mapset.NewSet[string]() + +type myStruct struct { + name string + age uint8 +} + +// Alternatively a set of structs +mySet := mapset.NewSet[myStruct]() + +// Lastly a set that can hold anything using the any or empty interface keyword: interface{}. This is effectively removes type safety. +mySet := mapset.NewSet[any]() +``` + +## Comprehensive Example + +```go +package main + +import ( + "fmt" + mapset "github.com/deckarep/golang-set/v2" +) + +func main() { + // Create a string-based set of required classes. + required := mapset.NewSet[string]() + required.Add("cooking") + required.Add("english") + required.Add("math") + required.Add("biology") + + // Create a string-based set of science classes. + sciences := mapset.NewSet[string]() + sciences.Add("biology") + sciences.Add("chemistry") + + // Create a string-based set of electives. + electives := mapset.NewSet[string]() + electives.Add("welding") + electives.Add("music") + electives.Add("automotive") + + // Create a string-based set of bonus programming classes. + bonus := mapset.NewSet[string]() + bonus.Add("beginner go") + bonus.Add("python for dummies") +} +``` + +Create a set of all unique classes. +Sets will *automatically* deduplicate the same data. + +```go + all := required + .Union(sciences) + .Union(electives) + .Union(bonus) + + fmt.Println(all) +``` + +Output: +```sh +Set{cooking, english, math, chemistry, welding, biology, music, automotive, beginner go, python for dummies} +``` + +Is cooking considered a science class? +```go +result := sciences.Contains("cooking") +fmt.Println(result) +``` + +Output: +```false +false +``` + +Show me all classes that are not science classes, since I don't enjoy science. +```go +notScience := all.Difference(sciences) +fmt.Println(notScience) +``` + +```sh +Set{ music, automotive, beginner go, python for dummies, cooking, english, math, welding } +``` + +Which science classes are also required classes? +```go +reqScience := sciences.Intersect(required) +``` + +Output: +```sh +Set{biology} +``` + +How many bonus classes do you offer? +```go +fmt.Println(bonus.Cardinality()) +``` +Output: +```sh +2 +``` + +Thanks for visiting! + +-deckarep diff --git a/vendor/github.com/deckarep/golang-set/v2/iterator.go b/vendor/github.com/deckarep/golang-set/v2/iterator.go new file mode 100644 index 0000000..fc14e70 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/v2/iterator.go @@ -0,0 +1,58 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 - 2022 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +// Iterator defines an iterator over a Set, its C channel can be used to range over the Set's +// elements. +type Iterator[T comparable] struct { + C <-chan T + stop chan struct{} +} + +// Stop stops the Iterator, no further elements will be received on C, C will be closed. +func (i *Iterator[T]) Stop() { + // Allows for Stop() to be called multiple times + // (close() panics when called on already closed channel) + defer func() { + recover() + }() + + close(i.stop) + + // Exhaust any remaining elements. + for range i.C { + } +} + +// newIterator returns a new Iterator instance together with its item and stop channels. +func newIterator[T comparable]() (*Iterator[T], chan<- T, <-chan struct{}) { + itemChan := make(chan T) + stopChan := make(chan struct{}) + return &Iterator[T]{ + C: itemChan, + stop: stopChan, + }, itemChan, stopChan +} diff --git a/vendor/github.com/deckarep/golang-set/v2/new_improved.jpeg b/vendor/github.com/deckarep/golang-set/v2/new_improved.jpeg new file mode 100644 index 0000000..429752a Binary files /dev/null and b/vendor/github.com/deckarep/golang-set/v2/new_improved.jpeg differ diff --git a/vendor/github.com/deckarep/golang-set/v2/set.go b/vendor/github.com/deckarep/golang-set/v2/set.go new file mode 100644 index 0000000..292089d --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/v2/set.go @@ -0,0 +1,255 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 - 2022 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Package mapset implements a simple and set collection. +// Items stored within it are unordered and unique. It supports +// typical set operations: membership testing, intersection, union, +// difference, symmetric difference and cloning. +// +// Package mapset provides two implementations of the Set +// interface. The default implementation is safe for concurrent +// access, but a non-thread-safe implementation is also provided for +// programs that can benefit from the slight speed improvement and +// that can enforce mutual exclusion through other means. +package mapset + +// Set is the primary interface provided by the mapset package. It +// represents an unordered set of data and a large number of +// operations that can be applied to that set. +type Set[T comparable] interface { + // Add adds an element to the set. Returns whether + // the item was added. + Add(val T) bool + + // Append multiple elements to the set. Returns + // the number of elements added. + Append(val ...T) int + + // Cardinality returns the number of elements in the set. + Cardinality() int + + // Clear removes all elements from the set, leaving + // the empty set. + Clear() + + // Clone returns a clone of the set using the same + // implementation, duplicating all keys. + Clone() Set[T] + + // Contains returns whether the given items + // are all in the set. + Contains(val ...T) bool + + // ContainsOne returns whether the given item + // is in the set. + // + // Contains may cause the argument to escape to the heap. + // See: https://github.com/deckarep/golang-set/issues/118 + ContainsOne(val T) bool + + // ContainsAny returns whether at least one of the + // given items are in the set. + ContainsAny(val ...T) bool + + // Difference returns the difference between this set + // and other. The returned set will contain + // all elements of this set that are not also + // elements of other. + // + // Note that the argument to Difference + // must be of the same type as the receiver + // of the method. Otherwise, Difference will + // panic. + Difference(other Set[T]) Set[T] + + // Equal determines if two sets are equal to each + // other. If they have the same cardinality + // and contain the same elements, they are + // considered equal. The order in which + // the elements were added is irrelevant. + // + // Note that the argument to Equal must be + // of the same type as the receiver of the + // method. Otherwise, Equal will panic. + Equal(other Set[T]) bool + + // Intersect returns a new set containing only the elements + // that exist only in both sets. + // + // Note that the argument to Intersect + // must be of the same type as the receiver + // of the method. Otherwise, Intersect will + // panic. + Intersect(other Set[T]) Set[T] + + // IsEmpty determines if there are elements in the set. + IsEmpty() bool + + // IsProperSubset determines if every element in this set is in + // the other set but the two sets are not equal. + // + // Note that the argument to IsProperSubset + // must be of the same type as the receiver + // of the method. Otherwise, IsProperSubset + // will panic. + IsProperSubset(other Set[T]) bool + + // IsProperSuperset determines if every element in the other set + // is in this set but the two sets are not + // equal. + // + // Note that the argument to IsSuperset + // must be of the same type as the receiver + // of the method. Otherwise, IsSuperset will + // panic. + IsProperSuperset(other Set[T]) bool + + // IsSubset determines if every element in this set is in + // the other set. + // + // Note that the argument to IsSubset + // must be of the same type as the receiver + // of the method. Otherwise, IsSubset will + // panic. + IsSubset(other Set[T]) bool + + // IsSuperset determines if every element in the other set + // is in this set. + // + // Note that the argument to IsSuperset + // must be of the same type as the receiver + // of the method. Otherwise, IsSuperset will + // panic. + IsSuperset(other Set[T]) bool + + // Each iterates over elements and executes the passed func against each element. + // If passed func returns true, stop iteration at the time. + Each(func(T) bool) + + // Iter returns a channel of elements that you can + // range over. + Iter() <-chan T + + // Iterator returns an Iterator object that you can + // use to range over the set. + Iterator() *Iterator[T] + + // Remove removes a single element from the set. + Remove(i T) + + // RemoveAll removes multiple elements from the set. + RemoveAll(i ...T) + + // String provides a convenient string representation + // of the current state of the set. + String() string + + // SymmetricDifference returns a new set with all elements which are + // in either this set or the other set but not in both. + // + // Note that the argument to SymmetricDifference + // must be of the same type as the receiver + // of the method. Otherwise, SymmetricDifference + // will panic. + SymmetricDifference(other Set[T]) Set[T] + + // Union returns a new set with all elements in both sets. + // + // Note that the argument to Union must be of the + // same type as the receiver of the method. + // Otherwise, Union will panic. + Union(other Set[T]) Set[T] + + // Pop removes and returns an arbitrary item from the set. + Pop() (T, bool) + + // ToSlice returns the members of the set as a slice. + ToSlice() []T + + // MarshalJSON will marshal the set into a JSON-based representation. + MarshalJSON() ([]byte, error) + + // UnmarshalJSON will unmarshal a JSON-based byte slice into a full Set datastructure. + // For this to work, set subtypes must implemented the Marshal/Unmarshal interface. + UnmarshalJSON(b []byte) error +} + +// NewSet creates and returns a new set with the given elements. +// Operations on the resulting set are thread-safe. +func NewSet[T comparable](vals ...T) Set[T] { + s := newThreadSafeSetWithSize[T](len(vals)) + for _, item := range vals { + s.Add(item) + } + return s +} + +// NewSetWithSize creates and returns a reference to an empty set with a specified +// capacity. Operations on the resulting set are thread-safe. +func NewSetWithSize[T comparable](cardinality int) Set[T] { + s := newThreadSafeSetWithSize[T](cardinality) + return s +} + +// NewThreadUnsafeSet creates and returns a new set with the given elements. +// Operations on the resulting set are not thread-safe. +func NewThreadUnsafeSet[T comparable](vals ...T) Set[T] { + s := newThreadUnsafeSetWithSize[T](len(vals)) + for _, item := range vals { + s.Add(item) + } + return s +} + +// NewThreadUnsafeSetWithSize creates and returns a reference to an empty set with +// a specified capacity. Operations on the resulting set are not thread-safe. +func NewThreadUnsafeSetWithSize[T comparable](cardinality int) Set[T] { + s := newThreadUnsafeSetWithSize[T](cardinality) + return s +} + +// NewSetFromMapKeys creates and returns a new set with the given keys of the map. +// Operations on the resulting set are thread-safe. +func NewSetFromMapKeys[T comparable, V any](val map[T]V) Set[T] { + s := NewSetWithSize[T](len(val)) + + for k := range val { + s.Add(k) + } + + return s +} + +// NewThreadUnsafeSetFromMapKeys creates and returns a new set with the given keys of the map. +// Operations on the resulting set are not thread-safe. +func NewThreadUnsafeSetFromMapKeys[T comparable, V any](val map[T]V) Set[T] { + s := NewThreadUnsafeSetWithSize[T](len(val)) + + for k := range val { + s.Add(k) + } + + return s +} diff --git a/vendor/github.com/deckarep/golang-set/v2/sorted.go b/vendor/github.com/deckarep/golang-set/v2/sorted.go new file mode 100644 index 0000000..8ee2e70 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/v2/sorted.go @@ -0,0 +1,42 @@ +//go:build go1.21 +// +build go1.21 + +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 - 2023 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +import ( + "cmp" + "slices" +) + +// Sorted returns a sorted slice of a set of any ordered type in ascending order. +// When sorting floating-point numbers, NaNs are ordered before other values. +func Sorted[E cmp.Ordered](set Set[E]) []E { + s := set.ToSlice() + slices.Sort(s) + return s +} diff --git a/vendor/github.com/deckarep/golang-set/v2/threadsafe.go b/vendor/github.com/deckarep/golang-set/v2/threadsafe.go new file mode 100644 index 0000000..ad7a834 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/v2/threadsafe.go @@ -0,0 +1,299 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 - 2022 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +import "sync" + +type threadSafeSet[T comparable] struct { + sync.RWMutex + uss threadUnsafeSet[T] +} + +func newThreadSafeSet[T comparable]() *threadSafeSet[T] { + return &threadSafeSet[T]{ + uss: newThreadUnsafeSet[T](), + } +} + +func newThreadSafeSetWithSize[T comparable](cardinality int) *threadSafeSet[T] { + return &threadSafeSet[T]{ + uss: newThreadUnsafeSetWithSize[T](cardinality), + } +} + +func (t *threadSafeSet[T]) Add(v T) bool { + t.Lock() + ret := t.uss.Add(v) + t.Unlock() + return ret +} + +func (t *threadSafeSet[T]) Append(v ...T) int { + t.Lock() + ret := t.uss.Append(v...) + t.Unlock() + return ret +} + +func (t *threadSafeSet[T]) Contains(v ...T) bool { + t.RLock() + ret := t.uss.Contains(v...) + t.RUnlock() + + return ret +} + +func (t *threadSafeSet[T]) ContainsOne(v T) bool { + t.RLock() + ret := t.uss.ContainsOne(v) + t.RUnlock() + + return ret +} + +func (t *threadSafeSet[T]) ContainsAny(v ...T) bool { + t.RLock() + ret := t.uss.ContainsAny(v...) + t.RUnlock() + + return ret +} + +func (t *threadSafeSet[T]) IsEmpty() bool { + return t.Cardinality() == 0 +} + +func (t *threadSafeSet[T]) IsSubset(other Set[T]) bool { + o := other.(*threadSafeSet[T]) + + t.RLock() + o.RLock() + + ret := t.uss.IsSubset(o.uss) + t.RUnlock() + o.RUnlock() + return ret +} + +func (t *threadSafeSet[T]) IsProperSubset(other Set[T]) bool { + o := other.(*threadSafeSet[T]) + + t.RLock() + defer t.RUnlock() + o.RLock() + defer o.RUnlock() + + return t.uss.IsProperSubset(o.uss) +} + +func (t *threadSafeSet[T]) IsSuperset(other Set[T]) bool { + return other.IsSubset(t) +} + +func (t *threadSafeSet[T]) IsProperSuperset(other Set[T]) bool { + return other.IsProperSubset(t) +} + +func (t *threadSafeSet[T]) Union(other Set[T]) Set[T] { + o := other.(*threadSafeSet[T]) + + t.RLock() + o.RLock() + + unsafeUnion := t.uss.Union(o.uss).(threadUnsafeSet[T]) + ret := &threadSafeSet[T]{uss: unsafeUnion} + t.RUnlock() + o.RUnlock() + return ret +} + +func (t *threadSafeSet[T]) Intersect(other Set[T]) Set[T] { + o := other.(*threadSafeSet[T]) + + t.RLock() + o.RLock() + + unsafeIntersection := t.uss.Intersect(o.uss).(threadUnsafeSet[T]) + ret := &threadSafeSet[T]{uss: unsafeIntersection} + t.RUnlock() + o.RUnlock() + return ret +} + +func (t *threadSafeSet[T]) Difference(other Set[T]) Set[T] { + o := other.(*threadSafeSet[T]) + + t.RLock() + o.RLock() + + unsafeDifference := t.uss.Difference(o.uss).(threadUnsafeSet[T]) + ret := &threadSafeSet[T]{uss: unsafeDifference} + t.RUnlock() + o.RUnlock() + return ret +} + +func (t *threadSafeSet[T]) SymmetricDifference(other Set[T]) Set[T] { + o := other.(*threadSafeSet[T]) + + t.RLock() + o.RLock() + + unsafeDifference := t.uss.SymmetricDifference(o.uss).(threadUnsafeSet[T]) + ret := &threadSafeSet[T]{uss: unsafeDifference} + t.RUnlock() + o.RUnlock() + return ret +} + +func (t *threadSafeSet[T]) Clear() { + t.Lock() + t.uss.Clear() + t.Unlock() +} + +func (t *threadSafeSet[T]) Remove(v T) { + t.Lock() + delete(t.uss, v) + t.Unlock() +} + +func (t *threadSafeSet[T]) RemoveAll(i ...T) { + t.Lock() + t.uss.RemoveAll(i...) + t.Unlock() +} + +func (t *threadSafeSet[T]) Cardinality() int { + t.RLock() + defer t.RUnlock() + return len(t.uss) +} + +func (t *threadSafeSet[T]) Each(cb func(T) bool) { + t.RLock() + for elem := range t.uss { + if cb(elem) { + break + } + } + t.RUnlock() +} + +func (t *threadSafeSet[T]) Iter() <-chan T { + ch := make(chan T) + go func() { + t.RLock() + + for elem := range t.uss { + ch <- elem + } + close(ch) + t.RUnlock() + }() + + return ch +} + +func (t *threadSafeSet[T]) Iterator() *Iterator[T] { + iterator, ch, stopCh := newIterator[T]() + + go func() { + t.RLock() + L: + for elem := range t.uss { + select { + case <-stopCh: + break L + case ch <- elem: + } + } + close(ch) + t.RUnlock() + }() + + return iterator +} + +func (t *threadSafeSet[T]) Equal(other Set[T]) bool { + o := other.(*threadSafeSet[T]) + + t.RLock() + o.RLock() + + ret := t.uss.Equal(o.uss) + t.RUnlock() + o.RUnlock() + return ret +} + +func (t *threadSafeSet[T]) Clone() Set[T] { + t.RLock() + + unsafeClone := t.uss.Clone().(threadUnsafeSet[T]) + ret := &threadSafeSet[T]{uss: unsafeClone} + t.RUnlock() + return ret +} + +func (t *threadSafeSet[T]) String() string { + t.RLock() + ret := t.uss.String() + t.RUnlock() + return ret +} + +func (t *threadSafeSet[T]) Pop() (T, bool) { + t.Lock() + defer t.Unlock() + return t.uss.Pop() +} + +func (t *threadSafeSet[T]) ToSlice() []T { + keys := make([]T, 0, t.Cardinality()) + t.RLock() + for elem := range t.uss { + keys = append(keys, elem) + } + t.RUnlock() + return keys +} + +func (t *threadSafeSet[T]) MarshalJSON() ([]byte, error) { + t.RLock() + b, err := t.uss.MarshalJSON() + t.RUnlock() + + return b, err +} + +func (t *threadSafeSet[T]) UnmarshalJSON(p []byte) error { + t.RLock() + err := t.uss.UnmarshalJSON(p) + t.RUnlock() + + return err +} diff --git a/vendor/github.com/deckarep/golang-set/v2/threadunsafe.go b/vendor/github.com/deckarep/golang-set/v2/threadunsafe.go new file mode 100644 index 0000000..8b17b01 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/v2/threadunsafe.go @@ -0,0 +1,330 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 - 2022 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +import ( + "encoding/json" + "fmt" + "strings" +) + +type threadUnsafeSet[T comparable] map[T]struct{} + +// Assert concrete type:threadUnsafeSet adheres to Set interface. +var _ Set[string] = (threadUnsafeSet[string])(nil) + +func newThreadUnsafeSet[T comparable]() threadUnsafeSet[T] { + return make(threadUnsafeSet[T]) +} + +func newThreadUnsafeSetWithSize[T comparable](cardinality int) threadUnsafeSet[T] { + return make(threadUnsafeSet[T], cardinality) +} + +func (s threadUnsafeSet[T]) Add(v T) bool { + prevLen := len(s) + s[v] = struct{}{} + return prevLen != len(s) +} + +func (s threadUnsafeSet[T]) Append(v ...T) int { + prevLen := len(s) + for _, val := range v { + (s)[val] = struct{}{} + } + return len(s) - prevLen +} + +// private version of Add which doesn't return a value +func (s threadUnsafeSet[T]) add(v T) { + s[v] = struct{}{} +} + +func (s threadUnsafeSet[T]) Cardinality() int { + return len(s) +} + +func (s threadUnsafeSet[T]) Clear() { + // Constructions like this are optimised by compiler, and replaced by + // mapclear() function, defined in + // https://github.com/golang/go/blob/29bbca5c2c1ad41b2a9747890d183b6dd3a4ace4/src/runtime/map.go#L993) + for key := range s { + delete(s, key) + } +} + +func (s threadUnsafeSet[T]) Clone() Set[T] { + clonedSet := newThreadUnsafeSetWithSize[T](s.Cardinality()) + for elem := range s { + clonedSet.add(elem) + } + return clonedSet +} + +func (s threadUnsafeSet[T]) Contains(v ...T) bool { + for _, val := range v { + if _, ok := s[val]; !ok { + return false + } + } + return true +} + +func (s threadUnsafeSet[T]) ContainsOne(v T) bool { + _, ok := s[v] + return ok +} + +func (s threadUnsafeSet[T]) ContainsAny(v ...T) bool { + for _, val := range v { + if _, ok := s[val]; ok { + return true + } + } + return false +} + +// private version of Contains for a single element v +func (s threadUnsafeSet[T]) contains(v T) (ok bool) { + _, ok = s[v] + return ok +} + +func (s threadUnsafeSet[T]) Difference(other Set[T]) Set[T] { + o := other.(threadUnsafeSet[T]) + + diff := newThreadUnsafeSet[T]() + for elem := range s { + if !o.contains(elem) { + diff.add(elem) + } + } + return diff +} + +func (s threadUnsafeSet[T]) Each(cb func(T) bool) { + for elem := range s { + if cb(elem) { + break + } + } +} + +func (s threadUnsafeSet[T]) Equal(other Set[T]) bool { + o := other.(threadUnsafeSet[T]) + + if s.Cardinality() != other.Cardinality() { + return false + } + for elem := range s { + if !o.contains(elem) { + return false + } + } + return true +} + +func (s threadUnsafeSet[T]) Intersect(other Set[T]) Set[T] { + o := other.(threadUnsafeSet[T]) + + intersection := newThreadUnsafeSet[T]() + // loop over smaller set + if s.Cardinality() < other.Cardinality() { + for elem := range s { + if o.contains(elem) { + intersection.add(elem) + } + } + } else { + for elem := range o { + if s.contains(elem) { + intersection.add(elem) + } + } + } + return intersection +} + +func (s threadUnsafeSet[T]) IsEmpty() bool { + return s.Cardinality() == 0 +} + +func (s threadUnsafeSet[T]) IsProperSubset(other Set[T]) bool { + return s.Cardinality() < other.Cardinality() && s.IsSubset(other) +} + +func (s threadUnsafeSet[T]) IsProperSuperset(other Set[T]) bool { + return s.Cardinality() > other.Cardinality() && s.IsSuperset(other) +} + +func (s threadUnsafeSet[T]) IsSubset(other Set[T]) bool { + o := other.(threadUnsafeSet[T]) + if s.Cardinality() > other.Cardinality() { + return false + } + for elem := range s { + if !o.contains(elem) { + return false + } + } + return true +} + +func (s threadUnsafeSet[T]) IsSuperset(other Set[T]) bool { + return other.IsSubset(s) +} + +func (s threadUnsafeSet[T]) Iter() <-chan T { + ch := make(chan T) + go func() { + for elem := range s { + ch <- elem + } + close(ch) + }() + + return ch +} + +func (s threadUnsafeSet[T]) Iterator() *Iterator[T] { + iterator, ch, stopCh := newIterator[T]() + + go func() { + L: + for elem := range s { + select { + case <-stopCh: + break L + case ch <- elem: + } + } + close(ch) + }() + + return iterator +} + +// Pop returns a popped item in case set is not empty, or nil-value of T +// if set is already empty +func (s threadUnsafeSet[T]) Pop() (v T, ok bool) { + for item := range s { + delete(s, item) + return item, true + } + return v, false +} + +func (s threadUnsafeSet[T]) Remove(v T) { + delete(s, v) +} + +func (s threadUnsafeSet[T]) RemoveAll(i ...T) { + for _, elem := range i { + delete(s, elem) + } +} + +func (s threadUnsafeSet[T]) String() string { + items := make([]string, 0, len(s)) + + for elem := range s { + items = append(items, fmt.Sprintf("%v", elem)) + } + return fmt.Sprintf("Set{%s}", strings.Join(items, ", ")) +} + +func (s threadUnsafeSet[T]) SymmetricDifference(other Set[T]) Set[T] { + o := other.(threadUnsafeSet[T]) + + sd := newThreadUnsafeSet[T]() + for elem := range s { + if !o.contains(elem) { + sd.add(elem) + } + } + for elem := range o { + if !s.contains(elem) { + sd.add(elem) + } + } + return sd +} + +func (s threadUnsafeSet[T]) ToSlice() []T { + keys := make([]T, 0, s.Cardinality()) + for elem := range s { + keys = append(keys, elem) + } + + return keys +} + +func (s threadUnsafeSet[T]) Union(other Set[T]) Set[T] { + o := other.(threadUnsafeSet[T]) + + n := s.Cardinality() + if o.Cardinality() > n { + n = o.Cardinality() + } + unionedSet := make(threadUnsafeSet[T], n) + + for elem := range s { + unionedSet.add(elem) + } + for elem := range o { + unionedSet.add(elem) + } + return unionedSet +} + +// MarshalJSON creates a JSON array from the set, it marshals all elements +func (s threadUnsafeSet[T]) MarshalJSON() ([]byte, error) { + items := make([]string, 0, s.Cardinality()) + + for elem := range s { + b, err := json.Marshal(elem) + if err != nil { + return nil, err + } + + items = append(items, string(b)) + } + + return []byte(fmt.Sprintf("[%s]", strings.Join(items, ","))), nil +} + +// UnmarshalJSON recreates a set from a JSON array, it only decodes +// primitive types. Numbers are decoded as json.Number. +func (s threadUnsafeSet[T]) UnmarshalJSON(b []byte) error { + var i []T + err := json.Unmarshal(b, &i) + if err != nil { + return err + } + s.Append(i...) + + return nil +} diff --git a/vendor/github.com/doug-martin/goqu/v9/dialect/sqlite3/sqlite3.go b/vendor/github.com/doug-martin/goqu/v9/dialect/sqlite3/sqlite3.go new file mode 100644 index 0000000..40ddb2a --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/dialect/sqlite3/sqlite3.go @@ -0,0 +1,76 @@ +package sqlite3 + +import ( + "time" + + "github.com/doug-martin/goqu/v9" + "github.com/doug-martin/goqu/v9/exp" +) + +func DialectOptions() *goqu.SQLDialectOptions { + opts := goqu.DefaultDialectOptions() + + opts.SupportsReturn = false + opts.SupportsOrderByOnUpdate = true + opts.SupportsLimitOnUpdate = true + opts.SupportsOrderByOnDelete = true + opts.SupportsLimitOnDelete = true + opts.SupportsConflictUpdateWhere = false + opts.SupportsInsertIgnoreSyntax = true + opts.SupportsConflictTarget = true + opts.SupportsMultipleUpdateTables = false + opts.WrapCompoundsInParens = false + opts.SupportsDistinctOn = false + opts.SupportsWindowFunction = false + opts.SupportsLateral = false + + opts.PlaceHolderFragment = []byte("?") + opts.IncludePlaceholderNum = false + opts.QuoteRune = '`' + opts.DefaultValuesFragment = []byte("") + opts.True = []byte("1") + opts.False = []byte("0") + opts.TimeFormat = time.RFC3339Nano + opts.BooleanOperatorLookup = map[exp.BooleanOperation][]byte{ + exp.EqOp: []byte("="), + exp.NeqOp: []byte("!="), + exp.GtOp: []byte(">"), + exp.GteOp: []byte(">="), + exp.LtOp: []byte("<"), + exp.LteOp: []byte("<="), + exp.InOp: []byte("IN"), + exp.NotInOp: []byte("NOT IN"), + exp.IsOp: []byte("IS"), + exp.IsNotOp: []byte("IS NOT"), + exp.LikeOp: []byte("LIKE"), + exp.NotLikeOp: []byte("NOT LIKE"), + exp.ILikeOp: []byte("LIKE"), + exp.NotILikeOp: []byte("NOT LIKE"), + exp.RegexpLikeOp: []byte("REGEXP"), + exp.RegexpNotLikeOp: []byte("NOT REGEXP"), + exp.RegexpILikeOp: []byte("REGEXP"), + exp.RegexpNotILikeOp: []byte("NOT REGEXP"), + } + opts.UseLiteralIsBools = false + opts.BitwiseOperatorLookup = map[exp.BitwiseOperation][]byte{ + exp.BitwiseOrOp: []byte("|"), + exp.BitwiseAndOp: []byte("&"), + exp.BitwiseLeftShiftOp: []byte("<<"), + exp.BitwiseRightShiftOp: []byte(">>"), + } + opts.EscapedRunes = map[rune][]byte{ + '\'': []byte("''"), + } + opts.InsertIgnoreClause = []byte("INSERT OR IGNORE INTO ") + opts.ConflictFragment = []byte(" ON CONFLICT ") + opts.ConflictDoUpdateFragment = []byte(" DO UPDATE SET ") + opts.ConflictDoNothingFragment = []byte(" DO NOTHING ") + opts.ForUpdateFragment = []byte("") + opts.OfFragment = []byte("") + opts.NowaitFragment = []byte("") + return opts +} + +func init() { + goqu.RegisterDialect("sqlite3", DialectOptions()) +} diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/attribute/README.md b/vendor/go.opentelemetry.io/otel/attribute/README.md new file mode 100644 index 0000000..5b3da8f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/README.md @@ -0,0 +1,3 @@ +# Attribute + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute) diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go new file mode 100644 index 0000000..eef51eb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -0,0 +1,5 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package attribute provides key and value attributes. +package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go new file mode 100644 index 0000000..318e42f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -0,0 +1,135 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "bytes" + "sync" + "sync/atomic" +) + +type ( + // Encoder is a mechanism for serializing an attribute set into a specific + // string representation that supports caching, to avoid repeated + // serialization. An example could be an exporter encoding the attribute + // set into a wire representation. + Encoder interface { + // Encode returns the serialized encoding of the attribute set using + // its Iterator. This result may be cached by a attribute.Set. + Encode(iterator Iterator) string + + // ID returns a value that is unique for each class of attribute + // encoder. Attribute encoders allocate these using `NewEncoderID`. + ID() EncoderID + } + + // EncoderID is used to identify distinct Encoder + // implementations, for caching encoded results. + EncoderID struct { + value uint64 + } + + // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of + // allocations used in encoding attributes. This implementation encodes a + // comma-separated list of key=value, with '/'-escaping of '=', ',', and + // '\'. + defaultAttrEncoder struct { + // pool is a pool of attribute set builders. The buffers in this pool + // grow to a size that most attribute encodings will not allocate new + // memory. + pool sync.Pool // *bytes.Buffer + } +) + +// escapeChar is used to ensure uniqueness of the attribute encoding where +// keys or values contain either '=' or ','. Since there is no parser needed +// for this encoding and its only requirement is to be unique, this choice is +// arbitrary. Users will see these in some exporters (e.g., stdout), so the +// backslash ('\') is used as a conventional choice. +const escapeChar = '\\' + +var ( + _ Encoder = &defaultAttrEncoder{} + + // encoderIDCounter is for generating IDs for other attribute encoders. + encoderIDCounter uint64 + + defaultEncoderOnce sync.Once + defaultEncoderID = NewEncoderID() + defaultEncoderInstance *defaultAttrEncoder +) + +// NewEncoderID returns a unique attribute encoder ID. It should be called +// once per each type of attribute encoder. Preferably in init() or in var +// definition. +func NewEncoderID() EncoderID { + return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} +} + +// DefaultEncoder returns an attribute encoder that encodes attributes in such +// a way that each escaped attribute's key is followed by an equal sign and +// then by an escaped attribute's value. All key-value pairs are separated by +// a comma. +// +// Escaping is done by prepending a backslash before either a backslash, equal +// sign or a comma. +func DefaultEncoder() Encoder { + defaultEncoderOnce.Do(func() { + defaultEncoderInstance = &defaultAttrEncoder{ + pool: sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, + }, + } + }) + return defaultEncoderInstance +} + +// Encode is a part of an implementation of the AttributeEncoder interface. +func (d *defaultAttrEncoder) Encode(iter Iterator) string { + buf := d.pool.Get().(*bytes.Buffer) + defer d.pool.Put(buf) + buf.Reset() + + for iter.Next() { + i, keyValue := iter.IndexedAttribute() + if i > 0 { + _, _ = buf.WriteRune(',') + } + copyAndEscape(buf, string(keyValue.Key)) + + _, _ = buf.WriteRune('=') + + if keyValue.Value.Type() == STRING { + copyAndEscape(buf, keyValue.Value.AsString()) + } else { + _, _ = buf.WriteString(keyValue.Value.Emit()) + } + } + return buf.String() +} + +// ID is a part of an implementation of the AttributeEncoder interface. +func (*defaultAttrEncoder) ID() EncoderID { + return defaultEncoderID +} + +// copyAndEscape escapes `=`, `,` and its own escape character (`\`), +// making the default encoding unique. +func copyAndEscape(buf *bytes.Buffer, val string) { + for _, ch := range val { + switch ch { + case '=', ',', escapeChar: + _, _ = buf.WriteRune(escapeChar) + } + _, _ = buf.WriteRune(ch) + } +} + +// Valid returns true if this encoder ID was allocated by +// `NewEncoderID`. Invalid encoder IDs will not be cached. +func (id EncoderID) Valid() bool { + return id.value != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go new file mode 100644 index 0000000..be9cd92 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Filter supports removing certain attributes from attribute sets. When +// the filter returns true, the attribute will be kept in the filtered +// attribute set. When the filter returns false, the attribute is excluded +// from the filtered attribute set, and the attribute instead appears in +// the removed list of excluded attributes. +type Filter func(KeyValue) bool + +// NewAllowKeysFilter returns a Filter that only allows attributes with one of +// the provided keys. +// +// If keys is empty a deny-all filter is returned. +func NewAllowKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return false } + } + + allowed := make(map[Key]struct{}) + for _, k := range keys { + allowed[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := allowed[kv.Key] + return ok + } +} + +// NewDenyKeysFilter returns a Filter that only allows attributes +// that do not have one of the provided keys. +// +// If keys is empty an allow-all filter is returned. +func NewDenyKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return true } + } + + forbid := make(map[Key]struct{}) + for _, k := range keys { + forbid[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := forbid[kv.Key] + return !ok + } +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go new file mode 100644 index 0000000..f2ba89c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -0,0 +1,150 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Iterator allows iterating over the set of attributes in order, sorted by +// key. +type Iterator struct { + storage *Set + idx int +} + +// MergeIterator supports iterating over two sets of attributes while +// eliminating duplicate values from the combined set. The first iterator +// value takes precedence. +type MergeIterator struct { + one oneIterator + two oneIterator + current KeyValue +} + +type oneIterator struct { + iter Iterator + done bool + attr KeyValue +} + +// Next moves the iterator to the next position. Returns false if there are no +// more attributes. +func (i *Iterator) Next() bool { + i.idx++ + return i.idx < i.Len() +} + +// Label returns current KeyValue. Must be called only after Next returns +// true. +// +// Deprecated: Use Attribute instead. +func (i *Iterator) Label() KeyValue { + return i.Attribute() +} + +// Attribute returns the current KeyValue of the Iterator. It must be called +// only after Next returns true. +func (i *Iterator) Attribute() KeyValue { + kv, _ := i.storage.Get(i.idx) + return kv +} + +// IndexedLabel returns current index and attribute. Must be called only +// after Next returns true. +// +// Deprecated: Use IndexedAttribute instead. +func (i *Iterator) IndexedLabel() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// IndexedAttribute returns current index and attribute. Must be called only +// after Next returns true. +func (i *Iterator) IndexedAttribute() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// Len returns a number of attributes in the iterated set. +func (i *Iterator) Len() int { + return i.storage.Len() +} + +// ToSlice is a convenience function that creates a slice of attributes from +// the passed iterator. The iterator is set up to start from the beginning +// before creating the slice. +func (i *Iterator) ToSlice() []KeyValue { + l := i.Len() + if l == 0 { + return nil + } + i.idx = -1 + slice := make([]KeyValue, 0, l) + for i.Next() { + slice = append(slice, i.Attribute()) + } + return slice +} + +// NewMergeIterator returns a MergeIterator for merging two attribute sets. +// Duplicates are resolved by taking the value from the first set. +func NewMergeIterator(s1, s2 *Set) MergeIterator { + mi := MergeIterator{ + one: makeOne(s1.Iter()), + two: makeOne(s2.Iter()), + } + return mi +} + +func makeOne(iter Iterator) oneIterator { + oi := oneIterator{ + iter: iter, + } + oi.advance() + return oi +} + +func (oi *oneIterator) advance() { + if oi.done = !oi.iter.Next(); !oi.done { + oi.attr = oi.iter.Attribute() + } +} + +// Next returns true if there is another attribute available. +func (m *MergeIterator) Next() bool { + if m.one.done && m.two.done { + return false + } + if m.one.done { + m.current = m.two.attr + m.two.advance() + return true + } + if m.two.done { + m.current = m.one.attr + m.one.advance() + return true + } + if m.one.attr.Key == m.two.attr.Key { + m.current = m.one.attr // first iterator attribute value wins + m.one.advance() + m.two.advance() + return true + } + if m.one.attr.Key < m.two.attr.Key { + m.current = m.one.attr + m.one.advance() + return true + } + m.current = m.two.attr + m.two.advance() + return true +} + +// Label returns the current value after Next() returns true. +// +// Deprecated: Use Attribute instead. +func (m *MergeIterator) Label() KeyValue { + return m.current +} + +// Attribute returns the current value after Next() returns true. +func (m *MergeIterator) Attribute() KeyValue { + return m.current +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go new file mode 100644 index 0000000..d9a22c6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -0,0 +1,123 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Key represents the key part in key-value pairs. It's a string. The +// allowed character set in the key depends on the use of the key. +type Key string + +// Bool creates a KeyValue instance with a BOOL Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Bool(name, value). +func (k Key) Bool(v bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolValue(v), + } +} + +// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- BoolSlice(name, value). +func (k Key) BoolSlice(v []bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolSliceValue(v), + } +} + +// Int creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int(name, value). +func (k Key) Int(v int) KeyValue { + return KeyValue{ + Key: k, + Value: IntValue(v), + } +} + +// IntSlice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- IntSlice(name, value). +func (k Key) IntSlice(v []int) KeyValue { + return KeyValue{ + Key: k, + Value: IntSliceValue(v), + } +} + +// Int64 creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64(name, value). +func (k Key) Int64(v int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64Value(v), + } +} + +// Int64Slice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64Slice(name, value). +func (k Key) Int64Slice(v []int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64SliceValue(v), + } +} + +// Float64 creates a KeyValue instance with a FLOAT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64(v float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64Value(v), + } +} + +// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64Slice(v []float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64SliceValue(v), + } +} + +// String creates a KeyValue instance with a STRING Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- String(name, value). +func (k Key) String(v string) KeyValue { + return KeyValue{ + Key: k, + Value: StringValue(v), + } +} + +// StringSlice creates a KeyValue instance with a STRINGSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- StringSlice(name, value). +func (k Key) StringSlice(v []string) KeyValue { + return KeyValue{ + Key: k, + Value: StringSliceValue(v), + } +} + +// Defined returns true for non-empty keys. +func (k Key) Defined() bool { + return len(k) != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go new file mode 100644 index 0000000..3028f9a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" +) + +// KeyValue holds a key and value pair. +type KeyValue struct { + Key Key + Value Value +} + +// Valid returns if kv is a valid OpenTelemetry attribute. +func (kv KeyValue) Valid() bool { + return kv.Key.Defined() && kv.Value.Type() != INVALID +} + +// Bool creates a KeyValue with a BOOL Value type. +func Bool(k string, v bool) KeyValue { + return Key(k).Bool(v) +} + +// BoolSlice creates a KeyValue with a BOOLSLICE Value type. +func BoolSlice(k string, v []bool) KeyValue { + return Key(k).BoolSlice(v) +} + +// Int creates a KeyValue with an INT64 Value type. +func Int(k string, v int) KeyValue { + return Key(k).Int(v) +} + +// IntSlice creates a KeyValue with an INT64SLICE Value type. +func IntSlice(k string, v []int) KeyValue { + return Key(k).IntSlice(v) +} + +// Int64 creates a KeyValue with an INT64 Value type. +func Int64(k string, v int64) KeyValue { + return Key(k).Int64(v) +} + +// Int64Slice creates a KeyValue with an INT64SLICE Value type. +func Int64Slice(k string, v []int64) KeyValue { + return Key(k).Int64Slice(v) +} + +// Float64 creates a KeyValue with a FLOAT64 Value type. +func Float64(k string, v float64) KeyValue { + return Key(k).Float64(v) +} + +// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. +func Float64Slice(k string, v []float64) KeyValue { + return Key(k).Float64Slice(v) +} + +// String creates a KeyValue with a STRING Value type. +func String(k, v string) KeyValue { + return Key(k).String(v) +} + +// StringSlice creates a KeyValue with a STRINGSLICE Value type. +func StringSlice(k string, v []string) KeyValue { + return Key(k).StringSlice(v) +} + +// Stringer creates a new key-value pair with a passed name and a string +// value generated by the passed Stringer interface. +func Stringer(k string, v fmt.Stringer) KeyValue { + return Key(k).String(v.String()) +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go new file mode 100644 index 0000000..bff9c7f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -0,0 +1,431 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "cmp" + "encoding/json" + "reflect" + "slices" + "sort" +) + +type ( + // Set is the representation for a distinct attribute set. It manages an + // immutable set of attributes, with an internal cache for storing + // attribute encodings. + // + // This type will remain comparable for backwards compatibility. The + // equivalence of Sets across versions is not guaranteed to be stable. + // Prior versions may find two Sets to be equal or not when compared + // directly (i.e. ==), but subsequent versions may not. Users should use + // the Equals method to ensure stable equivalence checking. + // + // Users should also use the Distinct returned from Equivalent as a map key + // instead of a Set directly. In addition to that type providing guarantees + // on stable equivalence, it may also provide performance improvements. + Set struct { + equivalent Distinct + } + + // Distinct is a unique identifier of a Set. + // + // Distinct is designed to be ensures equivalence stability: comparisons + // will return the save value across versions. For this reason, Distinct + // should always be used as a map key instead of a Set. + Distinct struct { + iface interface{} + } + + // Sortable implements sort.Interface, used for sorting KeyValue. + // + // Deprecated: This type is no longer used. It was added as a performance + // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no + // longer supported by the module). + Sortable []KeyValue +) + +var ( + // keyValueType is used in computeDistinctReflect. + keyValueType = reflect.TypeOf(KeyValue{}) + + // emptySet is returned for empty attribute sets. + emptySet = &Set{ + equivalent: Distinct{ + iface: [0]KeyValue{}, + }, + } +) + +// EmptySet returns a reference to a Set with no elements. +// +// This is a convenience provided for optimized calling utility. +func EmptySet() *Set { + return emptySet +} + +// reflectValue abbreviates reflect.ValueOf(d). +func (d Distinct) reflectValue() reflect.Value { + return reflect.ValueOf(d.iface) +} + +// Valid returns true if this value refers to a valid Set. +func (d Distinct) Valid() bool { + return d.iface != nil +} + +// Len returns the number of attributes in this set. +func (l *Set) Len() int { + if l == nil || !l.equivalent.Valid() { + return 0 + } + return l.equivalent.reflectValue().Len() +} + +// Get returns the KeyValue at ordered position idx in this set. +func (l *Set) Get(idx int) (KeyValue, bool) { + if l == nil || !l.equivalent.Valid() { + return KeyValue{}, false + } + value := l.equivalent.reflectValue() + + if idx >= 0 && idx < value.Len() { + // Note: The Go compiler successfully avoids an allocation for + // the interface{} conversion here: + return value.Index(idx).Interface().(KeyValue), true + } + + return KeyValue{}, false +} + +// Value returns the value of a specified key in this set. +func (l *Set) Value(k Key) (Value, bool) { + if l == nil || !l.equivalent.Valid() { + return Value{}, false + } + rValue := l.equivalent.reflectValue() + vlen := rValue.Len() + + idx := sort.Search(vlen, func(idx int) bool { + return rValue.Index(idx).Interface().(KeyValue).Key >= k + }) + if idx >= vlen { + return Value{}, false + } + keyValue := rValue.Index(idx).Interface().(KeyValue) + if k == keyValue.Key { + return keyValue.Value, true + } + return Value{}, false +} + +// HasValue tests whether a key is defined in this set. +func (l *Set) HasValue(k Key) bool { + if l == nil { + return false + } + _, ok := l.Value(k) + return ok +} + +// Iter returns an iterator for visiting the attributes in this set. +func (l *Set) Iter() Iterator { + return Iterator{ + storage: l, + idx: -1, + } +} + +// ToSlice returns the set of attributes belonging to this set, sorted, where +// keys appear no more than once. +func (l *Set) ToSlice() []KeyValue { + iter := l.Iter() + return iter.ToSlice() +} + +// Equivalent returns a value that may be used as a map key. The Distinct type +// guarantees that the result will equal the equivalent. Distinct value of any +// attribute set with the same elements as this, where sets are made unique by +// choosing the last value in the input for any given key. +func (l *Set) Equivalent() Distinct { + if l == nil || !l.equivalent.Valid() { + return emptySet.equivalent + } + return l.equivalent +} + +// Equals returns true if the argument set is equivalent to this set. +func (l *Set) Equals(o *Set) bool { + return l.Equivalent() == o.Equivalent() +} + +// Encoded returns the encoded form of this set, according to encoder. +func (l *Set) Encoded(encoder Encoder) string { + if l == nil || encoder == nil { + return "" + } + + return encoder.Encode(l.Iter()) +} + +func empty() Set { + return Set{ + equivalent: emptySet.equivalent, + } +} + +// NewSet returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// Except for empty sets, this method adds an additional allocation compared +// with calls that include a Sortable. +func NewSet(kvs ...KeyValue) Set { + s, _ := NewSetWithFiltered(kvs, nil) + return s +} + +// NewSetWithSortable returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Sortable option as a memory optimization. +// +// Deprecated: Use [NewSet] instead. +func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { + s, _ := NewSetWithFiltered(kvs, nil) + return s +} + +// NewSetWithFiltered returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Filter to include/exclude attribute keys from the +// return value. Excluded keys are returned as a slice of attribute values. +func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + + // Stable sort so the following de-duplication can implement + // last-value-wins semantics. + slices.SortStableFunc(kvs, func(a, b KeyValue) int { + return cmp.Compare(a.Key, b.Key) + }) + + position := len(kvs) - 1 + offset := position - 1 + + // The requirements stated above require that the stable + // result be placed in the end of the input slice, while + // overwritten values are swapped to the beginning. + // + // De-duplicate with last-value-wins semantics. Preserve + // duplicate values at the beginning of the input slice. + for ; offset >= 0; offset-- { + if kvs[offset].Key == kvs[position].Key { + continue + } + position-- + kvs[offset], kvs[position] = kvs[position], kvs[offset] + } + kvs = kvs[position:] + + if filter != nil { + if div := filteredToFront(kvs, filter); div != 0 { + return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] + } + } + return Set{equivalent: computeDistinct(kvs)}, nil +} + +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +// +// Deprecated: Use [NewSetWithFiltered] instead. +func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) { + return NewSetWithFiltered(kvs, filter) +} + +// filteredToFront filters slice in-place using keep function. All KeyValues that need to +// be removed are moved to the front. All KeyValues that need to be kept are +// moved (in-order) to the back. The index for the first KeyValue to be kept is +// returned. +func filteredToFront(slice []KeyValue, keep Filter) int { + n := len(slice) + j := n + for i := n - 1; i >= 0; i-- { + if keep(slice[i]) { + j-- + slice[i], slice[j] = slice[j], slice[i] + } + } + return j +} + +// Filter returns a filtered copy of this Set. See the documentation for +// NewSetWithSortableFiltered for more details. +func (l *Set) Filter(re Filter) (Set, []KeyValue) { + if re == nil { + return *l, nil + } + + // Iterate in reverse to the first attribute that will be filtered out. + n := l.Len() + first := n - 1 + for ; first >= 0; first-- { + kv, _ := l.Get(first) + if !re(kv) { + break + } + } + + // No attributes will be dropped, return the immutable Set l and nil. + if first < 0 { + return *l, nil + } + + // Copy now that we know we need to return a modified set. + // + // Do not do this in-place on the underlying storage of *Set l. Sets are + // immutable and filtering should not change this. + slice := l.ToSlice() + + // Don't re-iterate the slice if only slice[0] is filtered. + if first == 0 { + // It is safe to assume len(slice) >= 1 given we found at least one + // attribute above that needs to be filtered out. + return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] + } + + // Move the filtered slice[first] to the front (preserving order). + kv := slice[first] + copy(slice[1:first+1], slice[:first]) + slice[0] = kv + + // Do not re-evaluate re(slice[first+1:]). + div := filteredToFront(slice[1:first+1], re) + 1 + return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] +} + +// computeDistinct returns a Distinct using either the fixed- or +// reflect-oriented code path, depending on the size of the input. The input +// slice is assumed to already be sorted and de-duplicated. +func computeDistinct(kvs []KeyValue) Distinct { + iface := computeDistinctFixed(kvs) + if iface == nil { + iface = computeDistinctReflect(kvs) + } + return Distinct{ + iface: iface, + } +} + +// computeDistinctFixed computes a Distinct for small slices. It returns nil +// if the input is too large for this code path. +func computeDistinctFixed(kvs []KeyValue) interface{} { + switch len(kvs) { + case 1: + ptr := new([1]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 2: + ptr := new([2]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 3: + ptr := new([3]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 4: + ptr := new([4]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 5: + ptr := new([5]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 6: + ptr := new([6]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 7: + ptr := new([7]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 8: + ptr := new([8]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 9: + ptr := new([9]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 10: + ptr := new([10]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + default: + return nil + } +} + +// computeDistinctReflect computes a Distinct using reflection, works for any +// size input. +func computeDistinctReflect(kvs []KeyValue) interface{} { + at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() + for i, keyValue := range kvs { + *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue + } + return at.Interface() +} + +// MarshalJSON returns the JSON encoding of the Set. +func (l *Set) MarshalJSON() ([]byte, error) { + return json.Marshal(l.equivalent.iface) +} + +// MarshalLog is the marshaling function used by the logging system to represent this Set. +func (l Set) MarshalLog() interface{} { + kvs := make(map[string]string) + for _, kv := range l.ToSlice() { + kvs[string(kv.Key)] = kv.Value.Emit() + } + return kvs +} + +// Len implements sort.Interface. +func (l *Sortable) Len() int { + return len(*l) +} + +// Swap implements sort.Interface. +func (l *Sortable) Swap(i, j int) { + (*l)[i], (*l)[j] = (*l)[j], (*l)[i] +} + +// Less implements sort.Interface. +func (l *Sortable) Less(i, j int) bool { + return (*l)[i].Key < (*l)[j].Key +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go new file mode 100644 index 0000000..e584b24 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT. + +package attribute + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[INVALID-0] + _ = x[BOOL-1] + _ = x[INT64-2] + _ = x[FLOAT64-3] + _ = x[STRING-4] + _ = x[BOOLSLICE-5] + _ = x[INT64SLICE-6] + _ = x[FLOAT64SLICE-7] + _ = x[STRINGSLICE-8] +} + +const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" + +var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go new file mode 100644 index 0000000..9ea0ecb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -0,0 +1,271 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + + "go.opentelemetry.io/otel/internal" + "go.opentelemetry.io/otel/internal/attribute" +) + +//go:generate stringer -type=Type + +// Type describes the type of the data Value holds. +type Type int // nolint: revive // redefines builtin Type. + +// Value represents the value part in key-value pairs. +type Value struct { + vtype Type + numeric uint64 + stringly string + slice interface{} +} + +const ( + // INVALID is used for a Value with no value set. + INVALID Type = iota + // BOOL is a boolean Type Value. + BOOL + // INT64 is a 64-bit signed integral Type Value. + INT64 + // FLOAT64 is a 64-bit floating point Type Value. + FLOAT64 + // STRING is a string Type Value. + STRING + // BOOLSLICE is a slice of booleans Type Value. + BOOLSLICE + // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. + INT64SLICE + // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. + FLOAT64SLICE + // STRINGSLICE is a slice of strings Type Value. + STRINGSLICE +) + +// BoolValue creates a BOOL Value. +func BoolValue(v bool) Value { + return Value{ + vtype: BOOL, + numeric: internal.BoolToRaw(v), + } +} + +// BoolSliceValue creates a BOOLSLICE Value. +func BoolSliceValue(v []bool) Value { + return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} +} + +// IntValue creates an INT64 Value. +func IntValue(v int) Value { + return Int64Value(int64(v)) +} + +// IntSliceValue creates an INTSLICE Value. +func IntSliceValue(v []int) Value { + var int64Val int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) + for i, val := range v { + cp.Elem().Index(i).SetInt(int64(val)) + } + return Value{ + vtype: INT64SLICE, + slice: cp.Elem().Interface(), + } +} + +// Int64Value creates an INT64 Value. +func Int64Value(v int64) Value { + return Value{ + vtype: INT64, + numeric: internal.Int64ToRaw(v), + } +} + +// Int64SliceValue creates an INT64SLICE Value. +func Int64SliceValue(v []int64) Value { + return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} +} + +// Float64Value creates a FLOAT64 Value. +func Float64Value(v float64) Value { + return Value{ + vtype: FLOAT64, + numeric: internal.Float64ToRaw(v), + } +} + +// Float64SliceValue creates a FLOAT64SLICE Value. +func Float64SliceValue(v []float64) Value { + return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} +} + +// StringValue creates a STRING Value. +func StringValue(v string) Value { + return Value{ + vtype: STRING, + stringly: v, + } +} + +// StringSliceValue creates a STRINGSLICE Value. +func StringSliceValue(v []string) Value { + return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} +} + +// Type returns a type of the Value. +func (v Value) Type() Type { + return v.vtype +} + +// AsBool returns the bool value. Make sure that the Value's type is +// BOOL. +func (v Value) AsBool() bool { + return internal.RawToBool(v.numeric) +} + +// AsBoolSlice returns the []bool value. Make sure that the Value's type is +// BOOLSLICE. +func (v Value) AsBoolSlice() []bool { + if v.vtype != BOOLSLICE { + return nil + } + return v.asBoolSlice() +} + +func (v Value) asBoolSlice() []bool { + return attribute.AsBoolSlice(v.slice) +} + +// AsInt64 returns the int64 value. Make sure that the Value's type is +// INT64. +func (v Value) AsInt64() int64 { + return internal.RawToInt64(v.numeric) +} + +// AsInt64Slice returns the []int64 value. Make sure that the Value's type is +// INT64SLICE. +func (v Value) AsInt64Slice() []int64 { + if v.vtype != INT64SLICE { + return nil + } + return v.asInt64Slice() +} + +func (v Value) asInt64Slice() []int64 { + return attribute.AsInt64Slice(v.slice) +} + +// AsFloat64 returns the float64 value. Make sure that the Value's +// type is FLOAT64. +func (v Value) AsFloat64() float64 { + return internal.RawToFloat64(v.numeric) +} + +// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is +// FLOAT64SLICE. +func (v Value) AsFloat64Slice() []float64 { + if v.vtype != FLOAT64SLICE { + return nil + } + return v.asFloat64Slice() +} + +func (v Value) asFloat64Slice() []float64 { + return attribute.AsFloat64Slice(v.slice) +} + +// AsString returns the string value. Make sure that the Value's type +// is STRING. +func (v Value) AsString() string { + return v.stringly +} + +// AsStringSlice returns the []string value. Make sure that the Value's type is +// STRINGSLICE. +func (v Value) AsStringSlice() []string { + if v.vtype != STRINGSLICE { + return nil + } + return v.asStringSlice() +} + +func (v Value) asStringSlice() []string { + return attribute.AsStringSlice(v.slice) +} + +type unknownValueType struct{} + +// AsInterface returns Value's data as interface{}. +func (v Value) AsInterface() interface{} { + switch v.Type() { + case BOOL: + return v.AsBool() + case BOOLSLICE: + return v.asBoolSlice() + case INT64: + return v.AsInt64() + case INT64SLICE: + return v.asInt64Slice() + case FLOAT64: + return v.AsFloat64() + case FLOAT64SLICE: + return v.asFloat64Slice() + case STRING: + return v.stringly + case STRINGSLICE: + return v.asStringSlice() + } + return unknownValueType{} +} + +// Emit returns a string representation of Value's data. +func (v Value) Emit() string { + switch v.Type() { + case BOOLSLICE: + return fmt.Sprint(v.asBoolSlice()) + case BOOL: + return strconv.FormatBool(v.AsBool()) + case INT64SLICE: + j, err := json.Marshal(v.asInt64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asInt64Slice()) + } + return string(j) + case INT64: + return strconv.FormatInt(v.AsInt64(), 10) + case FLOAT64SLICE: + j, err := json.Marshal(v.asFloat64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asFloat64Slice()) + } + return string(j) + case FLOAT64: + return fmt.Sprint(v.AsFloat64()) + case STRINGSLICE: + j, err := json.Marshal(v.asStringSlice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asStringSlice()) + } + return string(j) + case STRING: + return v.stringly + default: + return "unknown" + } +} + +// MarshalJSON returns the JSON encoding of the Value. +func (v Value) MarshalJSON() ([]byte, error) { + var jsonVal struct { + Type string + Value interface{} + } + jsonVal.Type = v.Type().String() + jsonVal.Value = v.AsInterface() + return json.Marshal(jsonVal) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go new file mode 100644 index 0000000..f32766e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -0,0 +1,100 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package attribute provide several helper functions for some commonly used +logic of processing attributes. +*/ +package attribute // import "go.opentelemetry.io/otel/internal/attribute" + +import ( + "reflect" +) + +// BoolSliceValue converts a bool slice into an array with same elements as slice. +func BoolSliceValue(v []bool) interface{} { + var zero bool + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) + return cp.Elem().Interface() +} + +// Int64SliceValue converts an int64 slice into an array with same elements as slice. +func Int64SliceValue(v []int64) interface{} { + var zero int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) + return cp.Elem().Interface() +} + +// Float64SliceValue converts a float64 slice into an array with same elements as slice. +func Float64SliceValue(v []float64) interface{} { + var zero float64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) + return cp.Elem().Interface() +} + +// StringSliceValue converts a string slice into an array with same elements as slice. +func StringSliceValue(v []string) interface{} { + var zero string + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) + return cp.Elem().Interface() +} + +// AsBoolSlice converts a bool array into a slice into with same elements as array. +func AsBoolSlice(v interface{}) []bool { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero bool + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]bool) +} + +// AsInt64Slice converts an int64 array into a slice into with same elements as array. +func AsInt64Slice(v interface{}) []int64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero int64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]int64) +} + +// AsFloat64Slice converts a float64 array into a slice into with same elements as array. +func AsFloat64Slice(v interface{}) []float64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero float64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]float64) +} + +// AsStringSlice converts a string array into a slice into with same elements as array. +func AsStringSlice(v interface{}) []string { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero string + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]string) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go new file mode 100644 index 0000000..4259f03 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/internal" + +//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go +//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go +//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go + +//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go +//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go +//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go +//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go +//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go new file mode 100644 index 0000000..3e7bb3b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/internal" + +import ( + "math" + "unsafe" +) + +func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. + if b { + return 1 + } + return 0 +} + +func RawToBool(r uint64) bool { + return r != 0 +} + +func Int64ToRaw(i int64) uint64 { + return uint64(i) +} + +func RawToInt64(r uint64) int64 { + return int64(r) +} + +func Float64ToRaw(f float64) uint64 { + return math.Float64bits(f) +} + +func RawToFloat64(r uint64) float64 { + return math.Float64frombits(r) +} + +func RawPtrToFloat64Ptr(r *uint64) *float64 { + return (*float64)(unsafe.Pointer(r)) +} + +func RawPtrToInt64Ptr(r *uint64) *int64 { + return (*int64)(unsafe.Pointer(r)) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/metric/README.md b/vendor/go.opentelemetry.io/otel/metric/README.md new file mode 100644 index 0000000..0cf902e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/README.md @@ -0,0 +1,3 @@ +# Metric API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/metric) diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go new file mode 100644 index 0000000..cf23db7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -0,0 +1,260 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Observable describes a set of instruments used asynchronously to +// record float64 measurements once per collection cycle. Observations of +// these instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Float64Observable interface { + Observable + + float64Observable() +} + +// Float64ObservableCounter is an instrument used to asynchronously record +// increasing float64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for +// unimplemented methods. +type Float64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableCounter + + Float64Observable +} + +// Float64ObservableCounterConfig contains options for asynchronous counter +// instruments that record float64 values. +type Float64ObservableCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableCounterConfig returns a new +// [Float64ObservableCounterConfig] with all opts applied. +func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig { + var config Float64ObservableCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableCounterOption applies options to a +// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableCounterOption. +type Float64ObservableCounterOption interface { + applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig +} + +// Float64ObservableUpDownCounter is an instrument used to asynchronously +// record float64 measurements once per collection cycle. Observations are only +// made within a callback for this instrument. The value observed is assumed +// the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableUpDownCounter + + Float64Observable +} + +// Float64ObservableUpDownCounterConfig contains options for asynchronous +// counter instruments that record float64 values. +type Float64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableUpDownCounterConfig returns a new +// [Float64ObservableUpDownCounterConfig] with all opts applied. +func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { + var config Float64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableUpDownCounterOption applies options to a +// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableUpDownCounterOption. +type Float64ObservableUpDownCounterOption interface { + applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig +} + +// Float64ObservableGauge is an instrument used to asynchronously record +// instantaneous float64 measurements once per collection cycle. Observations +// are only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableGauge + + Float64Observable +} + +// Float64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record float64 values. +type Float64ObservableGaugeConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig] +// with all opts applied. +func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig { + var config Float64ObservableGaugeConfig + for _, o := range opts { + config = o.applyFloat64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableGaugeOption applies options to a +// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableGaugeOption. +type Float64ObservableGaugeOption interface { + applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig +} + +// Float64Observer is a recorder of float64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Observer + + // Observe records the float64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value float64, options ...ObserveOption) +} + +// Float64Callback is a function registered with a Meter that makes +// observations for a Float64Observerable instrument it is registered with. +// Calls to the Float64Observer record measurement values for the +// Float64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Float64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Float64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Float64Callback func(context.Context, Float64Observer) error + +// Float64ObservableOption applies options to float64 Observer instruments. +type Float64ObservableOption interface { + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +type float64CallbackOpt struct { + cback Float64Callback +} + +func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithFloat64Callback adds callback to be called for an instrument. +func WithFloat64Callback(callback Float64Callback) Float64ObservableOption { + return float64CallbackOpt{callback} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go new file mode 100644 index 0000000..c82ba53 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -0,0 +1,258 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Observable describes a set of instruments used asynchronously to record +// int64 measurements once per collection cycle. Observations of these +// instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Int64Observable interface { + Observable + + int64Observable() +} + +// Int64ObservableCounter is an instrument used to asynchronously record +// increasing int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableCounter + + Int64Observable +} + +// Int64ObservableCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig] +// with all opts applied. +func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig { + var config Int64ObservableCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableCounterOption applies options to a +// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableCounterOption. +type Int64ObservableCounterOption interface { + applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig +} + +// Int64ObservableUpDownCounter is an instrument used to asynchronously record +// int64 measurements once per collection cycle. Observations are only made +// within a callback for this instrument. The value observed is assumed the to +// be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableUpDownCounter + + Int64Observable +} + +// Int64ObservableUpDownCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableUpDownCounterConfig returns a new +// [Int64ObservableUpDownCounterConfig] with all opts applied. +func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { + var config Int64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableUpDownCounterOption applies options to a +// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableUpDownCounterOption. +type Int64ObservableUpDownCounterOption interface { + applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig +} + +// Int64ObservableGauge is an instrument used to asynchronously record +// instantaneous int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableGauge + + Int64Observable +} + +// Int64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableGaugeConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig] +// with all opts applied. +func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig { + var config Int64ObservableGaugeConfig + for _, o := range opts { + config = o.applyInt64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableGaugeOption applies options to a +// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableGaugeOption. +type Int64ObservableGaugeOption interface { + applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig +} + +// Int64Observer is a recorder of int64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Observer + + // Observe records the int64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value int64, options ...ObserveOption) +} + +// Int64Callback is a function registered with a Meter that makes observations +// for an Int64Observerable instrument it is registered with. Calls to the +// Int64Observer record measurement values for the Int64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Int64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Int64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Int64Callback func(context.Context, Int64Observer) error + +// Int64ObservableOption applies options to int64 Observer instruments. +type Int64ObservableOption interface { + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption +} + +type int64CallbackOpt struct { + cback Int64Callback +} + +func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithInt64Callback adds callback to be called for an instrument. +func WithInt64Callback(callback Int64Callback) Int64ObservableOption { + return int64CallbackOpt{callback} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go new file mode 100644 index 0000000..d9e3b13 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// MeterConfig contains options for Meters. +type MeterConfig struct { + instrumentationVersion string + schemaURL string + attrs attribute.Set + + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. +} + +// InstrumentationVersion returns the version of the library providing +// instrumentation. +func (cfg MeterConfig) InstrumentationVersion() string { + return cfg.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (cfg MeterConfig) InstrumentationAttributes() attribute.Set { + return cfg.attrs +} + +// SchemaURL is the schema_url of the library providing instrumentation. +func (cfg MeterConfig) SchemaURL() string { + return cfg.schemaURL +} + +// MeterOption is an interface for applying Meter options. +type MeterOption interface { + // applyMeter is used to set a MeterOption value of a MeterConfig. + applyMeter(MeterConfig) MeterConfig +} + +// NewMeterConfig creates a new MeterConfig and applies +// all the given options. +func NewMeterConfig(opts ...MeterOption) MeterConfig { + var config MeterConfig + for _, o := range opts { + config = o.applyMeter(config) + } + return config +} + +type meterOptionFunc func(MeterConfig) MeterConfig + +func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig { + return fn(cfg) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.instrumentationVersion = version + return config + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL. +func WithSchemaURL(schemaURL string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.schemaURL = schemaURL + return config + }) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go new file mode 100644 index 0000000..075234b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -0,0 +1,159 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package metric provides the OpenTelemetry API used to measure metrics about +source code operation. + +This API is separate from its implementation so the instrumentation built from +it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official +OpenTelemetry implementation of this API. + +All measurements made with this package are made via instruments. These +instruments are created by a [Meter] which itself is created by a +[MeterProvider]. Applications need to accept a [MeterProvider] implementation +as a starting point when instrumenting. This can be done directly, or by using +the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an +appropriately named [Meter] from the accepted [MeterProvider], instrumentation +can then be built from the [Meter]'s instruments. + +# Instruments + +Each instrument is designed to make measurements of a particular type. Broadly, +all instruments fall into two overlapping logical categories: asynchronous or +synchronous, and int64 or float64. + +All synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are used to measure the operation and performance of source +code during the source code execution. These instruments only make measurements +when the source code they instrument is run. + +All asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) are used to measure metrics outside of the execution +of source code. They are said to make "observations" via a callback function +called once every measurement collection cycle. + +Each instrument is also grouped by the value type it measures. Either int64 or +float64. The value being measured will dictate which instrument in these +categories to use. + +Outside of these two broad categories, instruments are described by the +function they are designed to serve. All Counters ([Int64Counter], +[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are +designed to measure values that never decrease in value, but instead only +incrementally increase in value. UpDownCounters ([Int64UpDownCounter], +[Float64UpDownCounter], [Int64ObservableUpDownCounter], and +[Float64ObservableUpDownCounter]) on the other hand, are designed to measure +values that can increase and decrease. When more information needs to be +conveyed about all the synchronous measurements made during a collection cycle, +a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally, +when just the most recent measurement needs to be conveyed about an +asynchronous measurement, a Gauge ([Int64ObservableGauge] and +[Float64ObservableGauge]) should be used. + +See the [OpenTelemetry documentation] for more information about instruments +and their intended use. + +# Measurements + +Measurements are made by recording values and information about the values with +an instrument. How these measurements are recorded depends on the instrument. + +Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are recorded using the instrument methods directly. All +counter instruments have an Add method that is used to measure an increment +value, and all histogram instruments have a Record method to measure a data +point. + +Asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) record measurements within a callback function. The +callback is registered with the Meter which ensures the callback is called once +per collection cycle. A callback can be registered two ways: during the +instrument's creation using an option, or later using the RegisterCallback +method of the [Meter] that created the instrument. + +If the following criteria are met, an option ([WithInt64Callback] or +[WithFloat64Callback]) can be used during the asynchronous instrument's +creation to register a callback ([Int64Callback] or [Float64Callback], +respectively): + + - The measurement process is known when the instrument is created + - Only that instrument will make a measurement within the callback + - The callback never needs to be unregistered + +If the criteria are not met, use the RegisterCallback method of the [Meter] that +created the instrument to register a [Callback]. + +# API Implementations + +This package does not conform to the standard Go versioning policy, all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/metric/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/metric/embedded" + + type MeterProvider struct { + embedded.MeterProvider + // ... + } + +If an author wants the default behavior of their implementations to a panic, +they need to embed the API interface directly. + + import "go.opentelemetry.io/otel/metric" + + type MeterProvider struct { + metric.MeterProvider + // ... + } + +This is not a recommended behavior as it could lead to publishing packages that +contain runtime panics when users update other package that use newer versions +of [go.opentelemetry.io/otel/metric]. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/metric/noop]: + + import "go.opentelemetry.io/otel/metric/noop" + + type MeterProvider struct { + noop.MeterProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. + +[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ +[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider +*/ +package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md new file mode 100644 index 0000000..1f6e0ef --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md @@ -0,0 +1,3 @@ +# Metric Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded) diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go new file mode 100644 index 0000000..1a9dc68 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -0,0 +1,243 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// metric API]. +// +// Implementers of the [OpenTelemetry metric API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry metric API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric +package embedded // import "go.opentelemetry.io/otel/metric/embedded" + +// MeterProvider is embedded in +// [go.opentelemetry.io/otel/metric.MeterProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type MeterProvider interface{ meterProvider() } + +// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Meter interface{ meter() } + +// Float64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Float64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Observer] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Observer interface{ float64Observer() } + +// Int64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Int64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Observer interface{ int64Observer() } + +// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Observer] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Observer interface{ observer() } + +// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Registration] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Registration] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Registration interface{ registration() } + +// Float64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Float64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Counter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Counter interface{ float64Counter() } + +// Float64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Float64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Histogram interface{ float64Histogram() } + +// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64Gauge interface{ float64Gauge() } + +// Float64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableCounter interface{ float64ObservableCounter() } + +// Float64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableGauge interface{ float64ObservableGauge() } + +// Float64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// if you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() } + +// Float64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Float64UpDownCounter interface{ float64UpDownCounter() } + +// Int64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Int64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Counter interface{ int64Counter() } + +// Int64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Int64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Histogram interface{ int64Histogram() } + +// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience +// a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64Gauge interface{ int64Gauge() } + +// Int64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableCounter interface{ int64ObservableCounter() } + +// Int64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Int64ObservableGauge interface{ int64ObservableGauge() } + +// Int64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if +// you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() } + +// Int64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64UpDownCounter interface{ int64UpDownCounter() } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go new file mode 100644 index 0000000..ea52e40 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -0,0 +1,368 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// Observable is used as a grouping mechanism for all instruments that are +// updated within a Callback. +type Observable interface { + observable() +} + +// InstrumentOption applies options to all instruments. +type InstrumentOption interface { + Int64CounterOption + Int64UpDownCounterOption + Int64HistogramOption + Int64GaugeOption + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption + + Float64CounterOption + Float64UpDownCounterOption + Float64HistogramOption + Float64GaugeOption + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +// HistogramOption applies options to histogram instruments. +type HistogramOption interface { + Int64HistogramOption + Float64HistogramOption +} + +type descOpt string + +func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.description = string(o) + return c +} + +// WithDescription sets the instrument description. +func WithDescription(desc string) InstrumentOption { return descOpt(desc) } + +type unitOpt string + +func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +// WithUnit sets the instrument unit. +// +// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. +func WithUnit(u string) InstrumentOption { return unitOpt(u) } + +// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries. +// +// This option is considered "advisory", and may be ignored by API implementations. +func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) } + +type bucketOpt []float64 + +func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +// AddOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as an AddOption. +type AddOption interface { + applyAdd(AddConfig) AddConfig +} + +// AddConfig contains options for an addition measurement. +type AddConfig struct { + attrs attribute.Set +} + +// NewAddConfig returns a new [AddConfig] with all opts applied. +func NewAddConfig(opts []AddOption) AddConfig { + config := AddConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyAdd(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c AddConfig) Attributes() attribute.Set { + return c.attrs +} + +// RecordOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a RecordOption. +type RecordOption interface { + applyRecord(RecordConfig) RecordConfig +} + +// RecordConfig contains options for a recorded measurement. +type RecordConfig struct { + attrs attribute.Set +} + +// NewRecordConfig returns a new [RecordConfig] with all opts applied. +func NewRecordConfig(opts []RecordOption) RecordConfig { + config := RecordConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyRecord(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c RecordConfig) Attributes() attribute.Set { + return c.attrs +} + +// ObserveOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a ObserveOption. +type ObserveOption interface { + applyObserve(ObserveConfig) ObserveConfig +} + +// ObserveConfig contains options for an observed measurement. +type ObserveConfig struct { + attrs attribute.Set +} + +// NewObserveConfig returns a new [ObserveConfig] with all opts applied. +func NewObserveConfig(opts []ObserveOption) ObserveConfig { + config := ObserveConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyObserve(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c ObserveConfig) Attributes() attribute.Set { + return c.attrs +} + +// MeasurementOption applies options to all instrument measurement. +type MeasurementOption interface { + AddOption + RecordOption + ObserveOption +} + +type attrOpt struct { + set attribute.Set +} + +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + +func (o attrOpt) applyAdd(c AddConfig) AddConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyRecord(c RecordConfig) RecordConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +// WithAttributeSet sets the attribute Set associated with a measurement is +// made with. +// +// If multiple WithAttributeSet or WithAttributes options are passed the +// attributes will be merged together in the order they are passed. Attributes +// with duplicate keys will use the last value passed. +func WithAttributeSet(attributes attribute.Set) MeasurementOption { + return attrOpt{set: attributes} +} + +// WithAttributes converts attributes into an attribute Set and sets the Set to +// be associated with a measurement. This is shorthand for: +// +// cp := make([]attribute.KeyValue, len(attributes)) +// copy(cp, attributes) +// WithAttributes(attribute.NewSet(cp...)) +// +// [attribute.NewSet] may modify the passed attributes so this will make a copy +// of attributes before creating a set in order to ensure this function is +// concurrent safe. This makes this option function less optimized in +// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be +// preferred for performance sensitive code. +// +// See [WithAttributeSet] for information about how multiple WithAttributes are +// merged. +func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption { + cp := make([]attribute.KeyValue, len(attributes)) + copy(cp, attributes) + return attrOpt{set: attribute.NewSet(cp...)} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go new file mode 100644 index 0000000..460b3f9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -0,0 +1,209 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// MeterProvider provides access to named Meter instances, for instrumenting +// an application or package. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type MeterProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.MeterProvider + + // Meter returns a new Meter with the provided name and configuration. + // + // A Meter should be scoped at most to a single package. The name needs to + // be unique so it does not collide with other names used by + // an application, nor other applications. To achieve this, the import path + // of the instrumentation package is recommended to be used as name. + // + // If the name is empty, then an implementation defined default name will + // be used instead. + Meter(name string, opts ...MeterOption) Meter +} + +// Meter provides access to instrument instances for recording metrics. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Meter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Meter + + // Int64Counter returns a new Int64Counter instrument identified by name + // and configured with options. The instrument is used to synchronously + // record increasing int64 measurements during a computational operation. + Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record int64 measurements during a computational + // operation. + Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of int64 measurements during a + // computational operation. + Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64Gauge returns a new Int64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous int64 measurements during a computational operation. + Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified + // by name and configured with options. The instrument is used to + // asynchronously record increasing int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record int64 measurements once per + // a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) + + // Float64Counter returns a new Float64Counter instrument identified by + // name and configured with options. The instrument is used to + // synchronously record increasing float64 measurements during a + // computational operation. + Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record float64 measurements during a computational + // operation. + Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of float64 measurements during a + // computational operation. + Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64Gauge returns a new Float64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous float64 measurements during a computational operation. + Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + // Float64ObservableCounter returns a new Float64ObservableCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record increasing float64 + // measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new + // Float64ObservableUpDownCounter instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // float64 measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous float64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) + + // RegisterCallback registers f to be called during the collection of a + // measurement cycle. + // + // If Unregister of the returned Registration is called, f needs to be + // unregistered and not called during collection. + // + // The instruments f is registered with are the only instruments that f may + // observe values for. + // + // If no instruments are passed, f should not be registered nor called + // during collection. + // + // The function f needs to be concurrent safe. + RegisterCallback(f Callback, instruments ...Observable) (Registration, error) +} + +// Callback is a function registered with a Meter that makes observations for +// the set of instruments it is registered with. The Observer parameter is used +// to record measurement observations for these instruments. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Callbacks. Meaning, it should not report measurements for an instrument with +// the same attributes as another Callback will report. +// +// The function needs to be concurrent safe. +type Callback func(context.Context, Observer) error + +// Observer records measurements for multiple instruments in a Callback. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Observer + + // ObserveFloat64 records the float64 value for obsrv. + ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. + ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) +} + +// Registration is an token representing the unique registration of a callback +// for a set of instruments with a Meter. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Registration interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Registration + + // Unregister removes the callback registration from a Meter. + // + // This method needs to be idempotent and concurrent safe. + Unregister() error +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go new file mode 100644 index 0000000..8403a4b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -0,0 +1,226 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Counter is an instrument that records increasing float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64CounterConfig contains options for synchronous counter instruments that +// record float64 values. +type Float64CounterConfig struct { + description string + unit string +} + +// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts +// applied. +func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig { + var config Float64CounterConfig + for _, o := range opts { + config = o.applyFloat64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64CounterConfig) Unit() string { + return c.unit +} + +// Float64CounterOption applies options to a [Float64CounterConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64CounterOption. +type Float64CounterOption interface { + applyFloat64Counter(Float64CounterConfig) Float64CounterConfig +} + +// Float64UpDownCounter is an instrument that records increasing or decreasing +// float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64UpDownCounterConfig contains options for synchronous counter +// instruments that record float64 values. +type Float64UpDownCounterConfig struct { + description string + unit string +} + +// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig] +// with all opts applied. +func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig { + var config Float64UpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Float64UpDownCounterOption applies options to a +// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that +// can be used as a Float64UpDownCounterOption. +type Float64UpDownCounterOption interface { + applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig +} + +// Float64Histogram is an instrument that records a distribution of float64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr float64, options ...RecordOption) +} + +// Float64HistogramConfig contains options for synchronous histogram +// instruments that record float64 values. +type Float64HistogramConfig struct { + description string + unit string + explicitBucketBoundaries []float64 +} + +// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all +// opts applied. +func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig { + var config Float64HistogramConfig + for _, o := range opts { + config = o.applyFloat64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Float64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64HistogramConfig) Unit() string { + return c.unit +} + +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + +// Float64HistogramOption applies options to a [Float64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64HistogramOption. +type Float64HistogramOption interface { + applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig +} + +// Float64Gauge is an instrument that records instantaneous float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value float64, options ...RecordOption) +} + +// Float64GaugeConfig contains options for synchronous gauge instruments that +// record float64 values. +type Float64GaugeConfig struct { + description string + unit string +} + +// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts +// applied. +func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig { + var config Float64GaugeConfig + for _, o := range opts { + config = o.applyFloat64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64GaugeConfig) Unit() string { + return c.unit +} + +// Float64GaugeOption applies options to a [Float64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64GaugeOption. +type Float64GaugeOption interface { + applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go new file mode 100644 index 0000000..783fdfb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -0,0 +1,226 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Counter is an instrument that records increasing int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64CounterConfig contains options for synchronous counter instruments that +// record int64 values. +type Int64CounterConfig struct { + description string + unit string +} + +// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts +// applied. +func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig { + var config Int64CounterConfig + for _, o := range opts { + config = o.applyInt64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64CounterConfig) Unit() string { + return c.unit +} + +// Int64CounterOption applies options to a [Int64CounterConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64CounterOption. +type Int64CounterOption interface { + applyInt64Counter(Int64CounterConfig) Int64CounterConfig +} + +// Int64UpDownCounter is an instrument that records increasing or decreasing +// int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64UpDownCounterConfig contains options for synchronous counter +// instruments that record int64 values. +type Int64UpDownCounterConfig struct { + description string + unit string +} + +// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with +// all opts applied. +func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig { + var config Int64UpDownCounterConfig + for _, o := range opts { + config = o.applyInt64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig]. +// See [InstrumentOption] for other options that can be used as an +// Int64UpDownCounterOption. +type Int64UpDownCounterOption interface { + applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig +} + +// Int64Histogram is an instrument that records a distribution of int64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr int64, options ...RecordOption) +} + +// Int64HistogramConfig contains options for synchronous histogram instruments +// that record int64 values. +type Int64HistogramConfig struct { + description string + unit string + explicitBucketBoundaries []float64 +} + +// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts +// applied. +func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig { + var config Int64HistogramConfig + for _, o := range opts { + config = o.applyInt64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Int64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64HistogramConfig) Unit() string { + return c.unit +} + +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + +// Int64HistogramOption applies options to a [Int64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64HistogramOption. +type Int64HistogramOption interface { + applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig +} + +// Int64Gauge is an instrument that records instantaneous int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value int64, options ...RecordOption) +} + +// Int64GaugeConfig contains options for synchronous gauge instruments that +// record int64 values. +type Int64GaugeConfig struct { + description string + unit string +} + +// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts +// applied. +func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig { + var config Int64GaugeConfig + for _, o := range opts { + config = o.applyInt64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64GaugeConfig) Unit() string { + return c.unit +} + +// Int64GaugeOption applies options to a [Int64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Int64GaugeOption. +type Int64GaugeOption interface { + applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig +} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index df4ebda..7967665 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -904,6 +904,10 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { return errors.New("ssh: signature did not verify") } +func (k *skECDSAPublicKey) CryptoPublicKey() crypto.PublicKey { + return &k.PublicKey +} + type skEd25519PublicKey struct { // application is a URL-like string, typically "ssh:" for SSH. // see openssh/PROTOCOL.u2f for details. @@ -1000,6 +1004,10 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { return nil } +func (k *skEd25519PublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + // NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, // *ecdsa.PrivateKey or any other crypto.Signer and returns a // corresponding Signer instance. ECDSA keys must use P-256, P-384 or diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index e2ae4f8..3ca9e89 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -462,6 +462,24 @@ func (p *PartialSuccessError) Error() string { // It is returned in ServerAuthError.Errors from NewServerConn. var ErrNoAuth = errors.New("ssh: no auth passed yet") +// BannerError is an error that can be returned by authentication handlers in +// ServerConfig to send a banner message to the client. +type BannerError struct { + Err error + Message string +} + +func (b *BannerError) Unwrap() error { + return b.Err +} + +func (b *BannerError) Error() string { + if b.Err == nil { + return b.Message + } + return b.Err.Error() +} + func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { sessionID := s.transport.getSessionID() var cache pubKeyCache @@ -734,6 +752,18 @@ userAuthLoop: config.AuthLogCallback(s, userAuthReq.Method, authErr) } + var bannerErr *BannerError + if errors.As(authErr, &bannerErr) { + if bannerErr.Message != "" { + bannerMsg := &userAuthBannerMsg{ + Message: bannerErr.Message, + } + if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { + return nil, err + } + } + } + if authErr == nil { break userAuthLoop } diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6f2df28..003e649 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -17,6 +17,7 @@ package http2 // import "golang.org/x/net/http2" import ( "bufio" + "context" "crypto/tls" "fmt" "io" @@ -26,6 +27,7 @@ import ( "strconv" "strings" "sync" + "time" "golang.org/x/net/http/httpguts" ) @@ -210,12 +212,6 @@ type stringWriter interface { WriteString(s string) (n int, err error) } -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - // A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). type closeWaiter chan struct{} @@ -383,3 +379,14 @@ func validPseudoPath(v string) bool { // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). type incomparable [0]func() + +// synctestGroupInterface is the methods of synctestGroup used by Server and Transport. +// It's defined as an interface here to let us keep synctestGroup entirely test-only +// and not a part of non-test builds. +type synctestGroupInterface interface { + Join() + Now() time.Time + NewTimer(d time.Duration) timer + AfterFunc(d time.Duration, f func()) timer + ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index c5d0810..6c349f3 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -154,6 +154,39 @@ type Server struct { // so that we don't embed a Mutex in this struct, which will make the // struct non-copyable, which might break some callers. state *serverInternalState + + // Synchronization group used for testing. + // Outside of tests, this is nil. + group synctestGroupInterface +} + +func (s *Server) markNewGoroutine() { + if s.group != nil { + s.group.Join() + } +} + +func (s *Server) now() time.Time { + if s.group != nil { + return s.group.Now() + } + return time.Now() +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (s *Server) newTimer(d time.Duration) timer { + if s.group != nil { + return s.group.NewTimer(d) + } + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (s *Server) afterFunc(d time.Duration, f func()) timer { + if s.group != nil { + return s.group.AfterFunc(d, f) + } + return timeTimer{time.AfterFunc(d, f)} } func (s *Server) initialConnRecvWindowSize() int32 { @@ -400,6 +433,10 @@ func (o *ServeConnOpts) handler() http.Handler { // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + s.serveConn(c, opts, nil) +} + +func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) { baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() @@ -426,6 +463,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { pushEnabled: true, sawClientPreface: opts.SawClientPreface, } + if newf != nil { + newf(sc) + } s.state.registerConn(sc) defer s.state.unregisterConn(sc) @@ -599,8 +639,8 @@ type serverConn struct { inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write goAwayCode ErrCode - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused + shutdownTimer timer // nil until used + idleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -649,12 +689,12 @@ type stream struct { flow outflow // limits writing from Handler to client inflow inflow // what the client is allowed to POST/etc to us state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - readDeadline *time.Timer // nil if unused - writeDeadline *time.Timer // nil if unused - closeErr error // set before cw is closed + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline timer // nil if unused + writeDeadline timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -811,8 +851,9 @@ type readFrameResult struct { // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done + sc.srv.markNewGoroutine() + gate := make(chan struct{}) + gateDone := func() { gate <- struct{}{} } for { f, err := sc.framer.ReadFrame() select { @@ -843,6 +884,7 @@ type frameWriteResult struct { // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { + sc.srv.markNewGoroutine() var err error if wd == nil { err = wr.write.writeFrame(sc) @@ -922,13 +964,13 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateIdle) if sc.srv.IdleTimeout > 0 { - sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() loopNum := 0 @@ -1057,10 +1099,10 @@ func (sc *serverConn) readPreface() error { errc <- nil } }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { - case <-timer.C: + case <-timer.C(): return errPrefaceTimeout case err := <-errc: if err == nil { @@ -1425,7 +1467,7 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) + sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -1639,7 +1681,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout > 0 { + if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -1661,6 +1703,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { } } st.closeErr = err + st.cancelCtx() st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.CloseStream(st.id) } @@ -2021,7 +2064,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) - st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } return sc.scheduleHandler(id, rw, req, handler) @@ -2119,7 +2162,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) if sc.hs.WriteTimeout > 0 { - st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } sc.streams[id] = st @@ -2343,6 +2386,7 @@ func (sc *serverConn) handlerDone() { // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + sc.srv.markNewGoroutine() defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { @@ -2639,7 +2683,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) + date = rws.conn.srv.now().UTC().Format(http.TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { @@ -2761,7 +2805,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { func (w *responseWriter) SetReadDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onReadTimeout() @@ -2777,9 +2821,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { if deadline.IsZero() { st.readDeadline = nil } else if st.readDeadline == nil { - st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout) } else { - st.readDeadline.Reset(deadline.Sub(time.Now())) + st.readDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil @@ -2787,7 +2831,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onWriteTimeout() @@ -2803,9 +2847,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { if deadline.IsZero() { st.writeDeadline = nil } else if st.writeDeadline == nil { - st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) + st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout) } else { - st.writeDeadline.Reset(deadline.Sub(time.Now())) + st.writeDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go deleted file mode 100644 index 61075bd..0000000 --- a/vendor/golang.org/x/net/http2/testsync.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package http2 - -import ( - "context" - "sync" - "time" -) - -// testSyncHooks coordinates goroutines in tests. -// -// For example, a call to ClientConn.RoundTrip involves several goroutines, including: -// - the goroutine running RoundTrip; -// - the clientStream.doRequest goroutine, which writes the request; and -// - the clientStream.readLoop goroutine, which reads the response. -// -// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines -// are blocked waiting for some condition such as reading the Request.Body or waiting for -// flow control to become available. -// -// The testSyncHooks also manage timers and synthetic time in tests. -// This permits us to, for example, start a request and cause it to time out waiting for -// response headers without resorting to time.Sleep calls. -type testSyncHooks struct { - // active/inactive act as a mutex and condition variable. - // - // - neither chan contains a value: testSyncHooks is locked. - // - active contains a value: unlocked, and at least one goroutine is not blocked - // - inactive contains a value: unlocked, and all goroutines are blocked - active chan struct{} - inactive chan struct{} - - // goroutine counts - total int // total goroutines - condwait map[*sync.Cond]int // blocked in sync.Cond.Wait - blocked []*testBlockedGoroutine // otherwise blocked - - // fake time - now time.Time - timers []*fakeTimer - - // Transport testing: Report various events. - newclientconn func(*ClientConn) - newstream func(*clientStream) -} - -// testBlockedGoroutine is a blocked goroutine. -type testBlockedGoroutine struct { - f func() bool // blocked until f returns true - ch chan struct{} // closed when unblocked -} - -func newTestSyncHooks() *testSyncHooks { - h := &testSyncHooks{ - active: make(chan struct{}, 1), - inactive: make(chan struct{}, 1), - condwait: map[*sync.Cond]int{}, - } - h.inactive <- struct{}{} - h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) - return h -} - -// lock acquires the testSyncHooks mutex. -func (h *testSyncHooks) lock() { - select { - case <-h.active: - case <-h.inactive: - } -} - -// waitInactive waits for all goroutines to become inactive. -func (h *testSyncHooks) waitInactive() { - for { - <-h.inactive - if !h.unlock() { - break - } - } -} - -// unlock releases the testSyncHooks mutex. -// It reports whether any goroutines are active. -func (h *testSyncHooks) unlock() (active bool) { - // Look for a blocked goroutine which can be unblocked. - blocked := h.blocked[:0] - unblocked := false - for _, b := range h.blocked { - if !unblocked && b.f() { - unblocked = true - close(b.ch) - } else { - blocked = append(blocked, b) - } - } - h.blocked = blocked - - // Count goroutines blocked on condition variables. - condwait := 0 - for _, count := range h.condwait { - condwait += count - } - - if h.total > condwait+len(blocked) { - h.active <- struct{}{} - return true - } else { - h.inactive <- struct{}{} - return false - } -} - -// goRun starts a new goroutine. -func (h *testSyncHooks) goRun(f func()) { - h.lock() - h.total++ - h.unlock() - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - f() - }() -} - -// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. -// It waits until f returns true before proceeding. -// -// Example usage: -// -// h.blockUntil(func() bool { -// // Is the context done yet? -// select { -// case <-ctx.Done(): -// default: -// return false -// } -// return true -// }) -// // Wait for the context to become done. -// <-ctx.Done() -// -// The function f passed to blockUntil must be non-blocking and idempotent. -func (h *testSyncHooks) blockUntil(f func() bool) { - if f() { - return - } - ch := make(chan struct{}) - h.lock() - h.blocked = append(h.blocked, &testBlockedGoroutine{ - f: f, - ch: ch, - }) - h.unlock() - <-ch -} - -// broadcast is sync.Cond.Broadcast. -func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { - h.lock() - delete(h.condwait, cond) - h.unlock() - cond.Broadcast() -} - -// broadcast is sync.Cond.Wait. -func (h *testSyncHooks) condWait(cond *sync.Cond) { - h.lock() - h.condwait[cond]++ - h.unlock() -} - -// newTimer creates a new fake timer. -func (h *testSyncHooks) newTimer(d time.Duration) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - c: make(chan time.Time), - } - h.timers = append(h.timers, t) - return t -} - -// afterFunc creates a new fake AfterFunc timer. -func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - f: f, - } - h.timers = append(h.timers, t) - return t -} - -func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(ctx) - t := h.afterFunc(d, cancel) - return ctx, func() { - t.Stop() - cancel() - } -} - -func (h *testSyncHooks) timeUntilEvent() time.Duration { - h.lock() - defer h.unlock() - var next time.Time - for _, t := range h.timers { - if next.IsZero() || t.when.Before(next) { - next = t.when - } - } - if d := next.Sub(h.now); d > 0 { - return d - } - return 0 -} - -// advance advances time and causes synthetic timers to fire. -func (h *testSyncHooks) advance(d time.Duration) { - h.lock() - defer h.unlock() - h.now = h.now.Add(d) - timers := h.timers[:0] - for _, t := range h.timers { - t := t // remove after go.mod depends on go1.22 - t.mu.Lock() - switch { - case t.when.After(h.now): - timers = append(timers, t) - case t.when.IsZero(): - // stopped timer - default: - t.when = time.Time{} - if t.c != nil { - close(t.c) - } - if t.f != nil { - h.total++ - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - t.f() - }() - } - } - t.mu.Unlock() - } - h.timers = timers -} - -// A timer wraps a time.Timer, or a synthetic equivalent in tests. -// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. -type timer interface { - C() <-chan time.Time - Stop() bool - Reset(d time.Duration) bool -} - -// timeTimer implements timer using real time. -type timeTimer struct { - t *time.Timer - c chan time.Time -} - -// newTimeTimer creates a new timer using real time. -func newTimeTimer(d time.Duration) timer { - ch := make(chan time.Time) - t := time.AfterFunc(d, func() { - close(ch) - }) - return &timeTimer{t, ch} -} - -// newTimeAfterFunc creates an AfterFunc timer using real time. -func newTimeAfterFunc(d time.Duration, f func()) timer { - return &timeTimer{ - t: time.AfterFunc(d, f), - } -} - -func (t timeTimer) C() <-chan time.Time { return t.c } -func (t timeTimer) Stop() bool { return t.t.Stop() } -func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } - -// fakeTimer implements timer using fake time. -type fakeTimer struct { - hooks *testSyncHooks - - mu sync.Mutex - when time.Time // when the timer will fire - c chan time.Time // closed when the timer fires; mutually exclusive with f - f func() // called when the timer fires; mutually exclusive with c -} - -func (t *fakeTimer) C() <-chan time.Time { return t.c } - -func (t *fakeTimer) Stop() bool { - t.mu.Lock() - defer t.mu.Unlock() - stopped := t.when.IsZero() - t.when = time.Time{} - return stopped -} - -func (t *fakeTimer) Reset(d time.Duration) bool { - if t.c != nil || t.f == nil { - panic("fakeTimer only supports Reset on AfterFunc timers") - } - t.mu.Lock() - defer t.mu.Unlock() - t.hooks.lock() - defer t.hooks.unlock() - active := !t.when.IsZero() - t.when = t.hooks.now.Add(d) - if !active { - t.hooks.timers = append(t.hooks.timers, t) - } - return active -} diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go new file mode 100644 index 0000000..0b1c17b --- /dev/null +++ b/vendor/golang.org/x/net/http2/timer.go @@ -0,0 +1,20 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import "time" + +// A timer is a time.Timer, as an interface which can be replaced in tests. +type timer = interface { + C() <-chan time.Time + Reset(d time.Duration) bool + Stop() bool +} + +// timeTimer adapts a time.Timer to the timer interface. +type timeTimer struct { + *time.Timer +} + +func (t timeTimer) C() <-chan time.Time { return t.Timer.C } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 2fa4949..98a49c6 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -185,7 +185,45 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool - syncHooks *testSyncHooks + *transportTestHooks +} + +// Hook points used for testing. +// Outside of tests, t.transportTestHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +type transportTestHooks struct { + newclientconn func(*ClientConn) + group synctestGroupInterface +} + +func (t *Transport) markNewGoroutine() { + if t != nil && t.transportTestHooks != nil { + t.transportTestHooks.group.Join() + } +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (t *Transport) newTimer(d time.Duration) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.NewTimer(d) + } + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (t *Transport) afterFunc(d time.Duration, f func()) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.AfterFunc(d, f) + } + return timeTimer{time.AfterFunc(d, f)} +} + +func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.ContextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } func (t *Transport) maxHeaderListSize() uint32 { @@ -352,60 +390,6 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder - - syncHooks *testSyncHooks // can be nil -} - -// Hook points used for testing. -// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. -// Inside tests, see the testSyncHooks function docs. - -// goRun starts a new goroutine. -func (cc *ClientConn) goRun(f func()) { - if cc.syncHooks != nil { - cc.syncHooks.goRun(f) - return - } - go f() -} - -// condBroadcast is cc.cond.Broadcast. -func (cc *ClientConn) condBroadcast() { - if cc.syncHooks != nil { - cc.syncHooks.condBroadcast(cc.cond) - } - cc.cond.Broadcast() -} - -// condWait is cc.cond.Wait. -func (cc *ClientConn) condWait() { - if cc.syncHooks != nil { - cc.syncHooks.condWait(cc.cond) - } - cc.cond.Wait() -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (cc *ClientConn) newTimer(d time.Duration) timer { - if cc.syncHooks != nil { - return cc.syncHooks.newTimer(d) - } - return newTimeTimer(d) -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { - if cc.syncHooks != nil { - return cc.syncHooks.afterFunc(d, f) - } - return newTimeAfterFunc(d, f) -} - -func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - if cc.syncHooks != nil { - return cc.syncHooks.contextWithTimeout(ctx, d) - } - return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -487,7 +471,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.condBroadcast() + cs.cc.cond.Broadcast() } } @@ -497,7 +481,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.condBroadcast() + cc.cond.Broadcast() } } @@ -507,10 +491,11 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - cs.cc.goRun(func() { + go func() { + cs.cc.t.markNewGoroutine() cs.reqBody.Close() close(reqBodyClosed) - }) + }() } type stickyErrWriter struct { @@ -626,21 +611,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - var tm timer - if t.syncHooks != nil { - tm = t.syncHooks.newTimer(d) - t.syncHooks.blockUntil(func() bool { - select { - case <-tm.C(): - case <-req.Context().Done(): - default: - return false - } - return true - }) - } else { - tm = newTimeTimer(d) - } + tm := t.newTimer(d) select { case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) @@ -725,8 +696,8 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { - if t.syncHooks != nil { - return t.newClientConn(nil, singleUse, t.syncHooks) + if t.transportTestHooks != nil { + return t.newClientConn(nil, singleUse) } host, _, err := net.SplitHostPort(addr) if err != nil { @@ -736,7 +707,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse, nil) + return t.newClientConn(tconn, singleUse) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -802,10 +773,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives(), nil) + return t.newClientConn(c, t.disableKeepAlives()) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -820,16 +791,12 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), - syncHooks: hooks, } - if hooks != nil { - hooks.newclientconn(cc) + if t.transportTestHooks != nil { + t.markNewGoroutine() + t.transportTestHooks.newclientconn(cc) c = cc.tconn } - if d := t.idleConnTimeout(); d != 0 { - cc.idleTimeout = d - cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) - } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) } @@ -893,7 +860,13 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo return nil, cc.werr } - cc.goRun(cc.readLoop) + // Start the idle timer after the connection is fully initialized. + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) + } + + go cc.readLoop() return cc, nil } @@ -901,7 +874,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1144,7 +1117,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - cc.goRun(func() { + go func() { + cc.t.markNewGoroutine() cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1156,9 +1130,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.condWait() + cc.cond.Wait() } - }) + }() shutdownEnterWaitStateHook() select { case <-done: @@ -1168,7 +1142,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() return ctx.Err() } @@ -1206,7 +1180,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() cc.closeConn() } @@ -1321,23 +1295,30 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - cc.goRun(func() { - cs.doRequest(req) - }) + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + !cs.isHead { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + cs.requestedGzip = true + } + + go cs.doRequest(req, streamf) waitDone := func() error { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.donec: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.donec: return nil @@ -1398,24 +1379,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) return err } - if streamf != nil { - streamf(cs) - } - for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1445,8 +1409,9 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) // doRequest runs for the duration of the request lifetime. // // It sends the request and performs post-request cleanup (closing Request.Body, etc.). -func (cs *clientStream) doRequest(req *http.Request) { - err := cs.writeRequest(req) +func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { + cs.cc.t.markNewGoroutine() + err := cs.writeRequest(req, streamf) cs.cleanupWriteRequest(err) } @@ -1457,7 +1422,7 @@ func (cs *clientStream) doRequest(req *http.Request) { // // It returns non-nil if the request ends otherwise. // If the returned error is StreamError, the error Code may be used in resetting the stream. -func (cs *clientStream) writeRequest(req *http.Request) (err error) { +func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStream)) (err error) { cc := cs.cc ctx := cs.ctx @@ -1471,21 +1436,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } - var newStreamHook func(*clientStream) - if cc.syncHooks != nil { - newStreamHook = cc.syncHooks.newstream - cc.syncHooks.blockUntil(func() bool { - select { - case cc.reqHeaderMu <- struct{}{}: - <-cc.reqHeaderMu - case <-cs.reqCancel: - case <-ctx.Done(): - default: - return false - } - return true - }) - } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1510,28 +1460,8 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() - if newStreamHook != nil { - newStreamHook(cs) - } - - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true + if streamf != nil { + streamf(cs) } continueTimeout := cc.t.expectContinueTimeout() @@ -1594,7 +1524,7 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := cc.newTimer(d) + timer := cc.t.newTimer(d) defer timer.Stop() respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv @@ -1603,21 +1533,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.peerClosed: - case <-respHeaderTimer: - case <-respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.peerClosed: return nil @@ -1766,7 +1681,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { return nil } cc.pendingRequests++ - cc.condWait() + cc.cond.Wait() cc.pendingRequests-- select { case <-cs.abort: @@ -2028,7 +1943,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.condWait() + cc.cond.Wait() } } @@ -2311,7 +2226,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.condBroadcast() + cc.cond.Broadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2333,6 +2248,7 @@ type clientConnReadLoop struct { // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { + cc.t.markNewGoroutine() rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() @@ -2399,7 +2315,7 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() } @@ -2436,7 +2352,7 @@ func (rl *clientConnReadLoop) run() error { readIdleTimeout := cc.t.ReadIdleTimeout var t timer if readIdleTimeout != 0 { - t = cc.afterFunc(readIdleTimeout, cc.healthCheck) + t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -3034,7 +2950,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.condBroadcast() + cc.cond.Broadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: @@ -3089,7 +3005,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { return ConnectionError(ErrCodeFlowControl) } - cc.condBroadcast() + cc.cond.Broadcast() return nil } @@ -3133,7 +3049,8 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } var pingError error errc := make(chan struct{}) - cc.goRun(func() { + go func() { + cc.t.markNewGoroutine() cc.wmu.Lock() defer cc.wmu.Unlock() if pingError = cc.fr.WritePing(false, p); pingError != nil { @@ -3144,20 +3061,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error { close(errc) return } - }) - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-c: - case <-errc: - case <-ctx.Done(): - case <-cc.readerDone: - default: - return false - } - return true - }) - } + }() select { case <-c: return nil diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go index 0a242c6..f678333 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -443,8 +443,8 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max } func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for k := n.kids; k != nil; k = k.next { - k.setParent(n.parent) + for n.kids != nil { + n.kids.setParent(n.parent) } n.setParent(nil) delete(ws.nodes, n.id) diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index fdcaa97..4ed2e48 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -263,6 +263,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -549,6 +550,7 @@ ccflags="$@" $2 !~ "NLA_TYPE_MASK" && $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || + $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 93a38a9..877a62b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -502,6 +502,7 @@ const ( BPF_IMM = 0x0 BPF_IND = 0x40 BPF_JA = 0x0 + BPF_JCOND = 0xe0 BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 @@ -657,6 +658,9 @@ const ( CAN_NPROTO = 0x8 CAN_RAW = 0x1 CAN_RAW_FILTER_MAX = 0x200 + CAN_RAW_XL_VCID_RX_FILTER = 0x4 + CAN_RAW_XL_VCID_TX_PASS = 0x2 + CAN_RAW_XL_VCID_TX_SET = 0x1 CAN_RTR_FLAG = 0x40000000 CAN_SFF_ID_BITS = 0xb CAN_SFF_MASK = 0x7ff @@ -1339,6 +1343,7 @@ const ( F_OFD_SETLK = 0x25 F_OFD_SETLKW = 0x26 F_OK = 0x0 + F_SEAL_EXEC = 0x20 F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 @@ -1627,6 +1632,7 @@ const ( IP_FREEBIND = 0xf IP_HDRINCL = 0x3 IP_IPSEC_POLICY = 0x10 + IP_LOCAL_PORT_RANGE = 0x33 IP_MAXPACKET = 0xffff IP_MAX_MEMBERSHIPS = 0x14 IP_MF = 0x2000 @@ -1653,6 +1659,7 @@ const ( IP_PMTUDISC_OMIT = 0x5 IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 + IP_PROTOCOL = 0x34 IP_RECVERR = 0xb IP_RECVERR_RFC4884 = 0x1a IP_RECVFRAGSIZE = 0x19 @@ -2169,7 +2176,7 @@ const ( NFT_SECMARK_CTX_MAXLEN = 0x100 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 - NFT_TABLE_F_MASK = 0x3 + NFT_TABLE_F_MASK = 0x7 NFT_TABLE_MAXNAMELEN = 0x100 NFT_TRACETYPE_MAX = 0x3 NFT_TUNNEL_F_MASK = 0x7 @@ -2403,6 +2410,7 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 PPPIOCGNPMODE = 0xc008744c PPPIOCNEWUNIT = 0xc004743e @@ -2896,8 +2904,9 @@ const ( RWF_APPEND = 0x10 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 + RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x1f + RWF_SUPPORTED = 0x3f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -2918,7 +2927,9 @@ const ( SCHED_RESET_ON_FORK = 0x40000000 SCHED_RR = 0x2 SCM_CREDENTIALS = 0x2 + SCM_PIDFD = 0x4 SCM_RIGHTS = 0x1 + SCM_SECURITY = 0x3 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 SECCOMP_ADDFD_FLAG_SEND = 0x2 @@ -3051,6 +3062,8 @@ const ( SIOCSMIIREG = 0x8949 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SK_DIAG_BPF_STORAGE_MAX = 0x3 + SK_DIAG_BPF_STORAGE_REQ_MAX = 0x1 SMACK_MAGIC = 0x43415d53 SMART_AUTOSAVE = 0xd2 SMART_AUTO_OFFLINE = 0xdb @@ -3071,6 +3084,8 @@ const ( SOCKFS_MAGIC = 0x534f434b SOCK_BUF_LOCK_MASK = 0x3 SOCK_DCCP = 0x6 + SOCK_DESTROY = 0x15 + SOCK_DIAG_BY_FAMILY = 0x14 SOCK_IOC_TYPE = 0x89 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -3260,6 +3275,7 @@ const ( TCP_MAX_WINSHIFT = 0xe TCP_MD5SIG = 0xe TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_IFINDEX = 0x2 TCP_MD5SIG_FLAG_PREFIX = 0x1 TCP_MD5SIG_MAXKEYLEN = 0x50 TCP_MSS = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 42ff8c3..e4bc0bd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -118,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index dca4360..689317a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -118,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d8cae6d..1427050 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -87,6 +87,7 @@ const ( FICLONE = 0x40049409 FICLONERANGE = 0x4020940d FLUSHO = 0x1000 + FPMR_MAGIC = 0x46504d52 FPSIMD_MAGIC = 0x46508001 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80086601 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 0036746..4740b83 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -4605,7 +4605,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x149 + NL80211_ATTR_MAX = 0x14a NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5209,7 +5209,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1f + NL80211_FREQUENCY_ATTR_MAX = 0x20 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5703,7 +5703,7 @@ const ( NL80211_STA_FLAG_ASSOCIATED = 0x7 NL80211_STA_FLAG_AUTHENTICATED = 0x5 NL80211_STA_FLAG_AUTHORIZED = 0x1 - NL80211_STA_FLAG_MAX = 0x7 + NL80211_STA_FLAG_MAX = 0x8 NL80211_STA_FLAG_MAX_OLD_API = 0x6 NL80211_STA_FLAG_MFP = 0x4 NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 @@ -6001,3 +6001,34 @@ type CachestatRange struct { Off uint64 Len uint64 } + +const ( + SK_MEMINFO_RMEM_ALLOC = 0x0 + SK_MEMINFO_RCVBUF = 0x1 + SK_MEMINFO_WMEM_ALLOC = 0x2 + SK_MEMINFO_SNDBUF = 0x3 + SK_MEMINFO_FWD_ALLOC = 0x4 + SK_MEMINFO_WMEM_QUEUED = 0x5 + SK_MEMINFO_OPTMEM = 0x6 + SK_MEMINFO_BACKLOG = 0x7 + SK_MEMINFO_DROPS = 0x8 + SK_MEMINFO_VARS = 0x9 + SKNLGRP_NONE = 0x0 + SKNLGRP_INET_TCP_DESTROY = 0x1 + SKNLGRP_INET_UDP_DESTROY = 0x2 + SKNLGRP_INET6_TCP_DESTROY = 0x3 + SKNLGRP_INET6_UDP_DESTROY = 0x4 + SK_DIAG_BPF_STORAGE_REQ_NONE = 0x0 + SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 0x1 + SK_DIAG_BPF_STORAGE_REP_NONE = 0x0 + SK_DIAG_BPF_STORAGE = 0x1 + SK_DIAG_BPF_STORAGE_NONE = 0x0 + SK_DIAG_BPF_STORAGE_PAD = 0x1 + SK_DIAG_BPF_STORAGE_MAP_ID = 0x2 + SK_DIAG_BPF_STORAGE_MAP_VALUE = 0x3 +) + +type SockDiagReq struct { + Family uint8 + Protocol uint8 +} diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 26be94a..6f7d2ac 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -68,6 +68,7 @@ type UserInfo10 struct { //sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo //sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation //sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree +//sys NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) = netapi32.NetUserEnum const ( // do not reorder diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 5c6035d..9f73df7 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -401,6 +401,7 @@ var ( procTransmitFile = modmswsock.NewProc("TransmitFile") procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetUserEnum = modnetapi32.NewProc("NetUserEnum") procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") procNtCreateFile = modntdll.NewProc("NtCreateFile") procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") @@ -3486,6 +3487,14 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete return } +func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { + r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) if r0 != 0 { diff --git a/vendor/modules.txt b/vendor/modules.txt index b4c90cd..49c6499 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -140,7 +140,7 @@ github.com/aws/smithy-go/waiter # github.com/benbjohnson/clock v1.3.5 ## explicit; go 1.15 github.com/benbjohnson/clock -# github.com/conductorone/baton-sdk v0.1.38 +# github.com/conductorone/baton-sdk v0.2.12 ## explicit; go 1.21 github.com/conductorone/baton-sdk/internal/connector github.com/conductorone/baton-sdk/pb/c1/c1z/v1 @@ -152,6 +152,7 @@ github.com/conductorone/baton-sdk/pb/c1/reader/v2 github.com/conductorone/baton-sdk/pb/c1/utls/v1 github.com/conductorone/baton-sdk/pkg/annotations github.com/conductorone/baton-sdk/pkg/cli +github.com/conductorone/baton-sdk/pkg/config github.com/conductorone/baton-sdk/pkg/connectorbuilder github.com/conductorone/baton-sdk/pkg/connectorrunner github.com/conductorone/baton-sdk/pkg/connectorstore @@ -162,13 +163,16 @@ github.com/conductorone/baton-sdk/pkg/dotc1z github.com/conductorone/baton-sdk/pkg/dotc1z/manager github.com/conductorone/baton-sdk/pkg/dotc1z/manager/local github.com/conductorone/baton-sdk/pkg/dotc1z/manager/s3 +github.com/conductorone/baton-sdk/pkg/field github.com/conductorone/baton-sdk/pkg/helpers github.com/conductorone/baton-sdk/pkg/logging +github.com/conductorone/baton-sdk/pkg/metrics github.com/conductorone/baton-sdk/pkg/pagination github.com/conductorone/baton-sdk/pkg/provisioner github.com/conductorone/baton-sdk/pkg/ratelimit github.com/conductorone/baton-sdk/pkg/sdk github.com/conductorone/baton-sdk/pkg/sync +github.com/conductorone/baton-sdk/pkg/sync/expand github.com/conductorone/baton-sdk/pkg/tasks github.com/conductorone/baton-sdk/pkg/tasks/c1api github.com/conductorone/baton-sdk/pkg/tasks/local @@ -176,6 +180,7 @@ github.com/conductorone/baton-sdk/pkg/types github.com/conductorone/baton-sdk/pkg/types/entitlement github.com/conductorone/baton-sdk/pkg/types/grant github.com/conductorone/baton-sdk/pkg/types/resource +github.com/conductorone/baton-sdk/pkg/types/tasks github.com/conductorone/baton-sdk/pkg/types/ticket github.com/conductorone/baton-sdk/pkg/ugrpc github.com/conductorone/baton-sdk/pkg/uhttp @@ -184,9 +189,13 @@ github.com/conductorone/baton-sdk/pkg/utls # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew +# github.com/deckarep/golang-set/v2 v2.6.0 +## explicit; go 1.18 +github.com/deckarep/golang-set/v2 # github.com/doug-martin/goqu/v9 v9.19.0 ## explicit; go 1.12 github.com/doug-martin/goqu/v9 +github.com/doug-martin/goqu/v9/dialect/sqlite3 github.com/doug-martin/goqu/v9/exec github.com/doug-martin/goqu/v9/exp github.com/doug-martin/goqu/v9/internal/errors @@ -362,6 +371,15 @@ github.com/tklauser/numcpus # github.com/yusufpapurcu/wmi v1.2.4 ## explicit; go 1.16 github.com/yusufpapurcu/wmi +# go.opentelemetry.io/otel v1.27.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/attribute +go.opentelemetry.io/otel/internal +go.opentelemetry.io/otel/internal/attribute +# go.opentelemetry.io/otel/metric v1.27.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/metric +go.opentelemetry.io/otel/metric/embedded # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr @@ -379,7 +397,7 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# golang.org/x/crypto v0.23.0 +# golang.org/x/crypto v0.24.0 ## explicit; go 1.18 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 @@ -402,7 +420,7 @@ golang.org/x/exp/slices golang.org/x/exp/slog golang.org/x/exp/slog/internal golang.org/x/exp/slog/internal/buffer -# golang.org/x/net v0.25.0 +# golang.org/x/net v0.26.0 ## explicit; go 1.18 golang.org/x/net/http/httpguts golang.org/x/net/http2 @@ -420,7 +438,7 @@ golang.org/x/oauth2/jwt # golang.org/x/sync v0.7.0 ## explicit; go 1.18 golang.org/x/sync/semaphore -# golang.org/x/sys v0.20.0 +# golang.org/x/sys v0.21.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/unix @@ -430,7 +448,7 @@ golang.org/x/sys/windows/svc golang.org/x/sys/windows/svc/debug golang.org/x/sys/windows/svc/eventlog golang.org/x/sys/windows/svc/mgr -# golang.org/x/text v0.15.0 +# golang.org/x/text v0.16.0 ## explicit; go 1.18 golang.org/x/text/cases golang.org/x/text/encoding